markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
调整gamma参数
|
svm = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
1.5 使用sklearn决策树实现分类器
  如果我们关心的是模型的可解释性,决策树是有用的模型,通过不断问问题走向不同的分支,最终得到类型
|
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)
tree.fit(X_train, y_train)
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X_combined, y_combined, classifier=tree, test_idx=range(105,150))
plt.xlabel('petal length [cm]')
plt.ylabel('petal width [cm]')
plt.legend(loc='upper left')
plt.show()
from sklearn.tree import export_graphviz
export_graphviz(tree, out_file='tree.dot', feature_names=['petal length', 'petal width'])
from sklearn.tree import export_graphviz
from IPython.display import Image
from sklearn.externals.six import StringIO
import pydot
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data, feature_names=['petal length', 'petal width'])
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png())
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
1.6 使用随机森林实现分类器
  随机森林由于在实现分类时效果良好,可伸缩,易用的特性比较受欢迎。随机森林可以看作决策树的组合,弱弱联合组成更健壮的模型,降低决策树的过拟合。随机森林算法可以总结为4步:
* 从样本集中通过重采样的方式产生n个样本
* 假设样本特征数目为a,对n个样本选择a中的k个特征,用建立决策树的方式获得最佳分割点
* 重复m次,产生m棵决策树
* 多数投票机制来进行预测
  随机森林的优势在于无需考虑如何选择超参数,模型足够健壮无需剪枝,只需要考虑决策树的个数k,k越大表现越良好但是计算成本就越大。
|
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2)
forest.fit(X_train, y_train)
plot_decision_regions(X_combined, y_combined, classifier=forest, test_idx=range(105,150))
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.legend(loc='upper left')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
1.7 K近邻算法实现分类器
  这是一个监督机器学习算法(KNN),KNN是典型的懒惰学习算法,他会记忆训练数据集而不是从数据集得到判断函数
参数模型和非参数模型
  机器学习算法可分为参数模型和非参数模型。参数模型用于从训练集估计参数,产生的函数无需用到以前的数据就可以为新数据分类,典型的例子是感知器、逻辑回归、线性SVM;非参数模型无法从固定参数集合获取特征,且参数数量随着训练集增加而增加,典型的例子是决策树、随机森林和核函数SVM、K近邻
|
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined, classifier=knn, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
减小过拟合
收集更多数据
通过正规化引入复杂度惩罚(L1正规化)
使用更少参数构建简单一些的模型
降维(序列化特征选择)
使用L1正规化
|
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium',
'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
print('Class labels', np.unique(df_wine['Class label']))
df_wine.head()
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
lr = LogisticRegression()
lr.fit(X_train_std, y_train)
print('Training accuracy:', lr.score(X_train_std, y_train))
print('Test accuracy:', lr.score(X_test_std, y_test))
lr1 = LogisticRegression(penalty='l1',C=5)
lr1.fit(X_train_std, y_train)
print('Training accuracy(With L1 regularzition):', lr1.score(X_train_std, y_train))
print('Test accuracy(With L1 regularzition):', lr1.score(X_test_std, y_test))
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
使用SBS序列化特征选择
|
from sklearn.base import clone
from itertools import combinations
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
class SBS():
def __init__(self, estimator, k_features, scoring=accuracy_score, test_size=0.25, random_state=1):
self.scoring = scoring
self.estimator = clone(estimator)
self.k_features = k_features
self.test_size = test_size
self.random_state = random_state
def fit(self, X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=self.random_state)
dim = X_train.shape[1]
self.indices_ = tuple(range(dim))
self.subsets_ = [self.indices_]
score = self._calc_score(X_train, y_train, X_test, y_test, self.indices_)
self.scores_ = [score]
while dim > self.k_features:
scores = []
subsets = []
for p in combinations(self.indices_, r=dim-1):
score = self._calc_score(X_train, y_train, X_test, y_test, p)
scores.append(score)
subsets.append(p)
best = np.argmax(scores)
self.indices_ = subsets[best]
self.subsets_.append(self.indices_)
dim -= 1
self.scores_.append(scores[best])
self.k_score_ = self.scores_[-1]
return self
def transform(self, X):
return X[:, self.indices_]
def _calc_score(self, X_train, y_train, X_test, y_test, indices):
self.estimator.fit(X_train[:, indices], y_train)
y_pred = self.estimator.predict(X_test[:, indices])
score = self.scoring(y_test, y_pred)
return score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
knn = KNeighborsClassifier(n_neighbors=2)
sbs = SBS(knn, k_features=1)
sbs.fit(X_train_std, y_train)
k_feat = [len(k) for k in sbs.subsets_]
plt.plot(k_feat, sbs.scores_, marker='o')
plt.ylim([0.7, 1.1])
plt.ylabel('Accuracy')
plt.xlabel('Number of features')
plt.grid()
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
我们挑选5个特征检查是否带来改善,从结果可以看出,使用更少的属性,测试集的准确率提高了2%
|
k5 = list(sbs.subsets_[8])
print(df_wine.columns[1:][k5])
knn.fit(X_train_std, y_train)
print('Training accuracy:', knn.score(X_train_std, y_train))
print('Test accuracy:', knn.score(X_test_std, y_test))
knn.fit(X_train_std[:, k5], y_train)
print('Training accuracy(select 5):', knn.score(X_train_std[:, k5], y_train))
print('Test accuracy(select 5):', knn.score(X_test_std[:, k5], y_test))
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
使用随机森林评估特征重要程度
  前面我们用L1标准化去除不相关特征,用SBS算法选择特征。另一种选择相关特性的方式是随机森林。
|
from sklearn.ensemble import RandomForestClassifier
feat_labels = df_wine.columns[1:]
forest = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30, feat_labels[f], importances[indices[f]]))
plt.title('Feature Importances')
plt.bar(range(X_train.shape[1]), importances[indices], color='lightblue', align='center')
plt.xticks(range(X_train.shape[1]), feat_labels, rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
2.1 通过降维压缩数据
主特征分析(PCA),压缩非监督学习数据
线性判别分析(PDA),降维监督学习数据
核心主特征分析(KPCA),降维非线性数据
Sklearn PCA
|
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.fit_transform(X_test)
pca = PCA(n_components=2)
lr = LogisticRegression()
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC1 train')
plt.ylabel('PC2 train')
plt.legend(loc='lower left')
plt.show()
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC1 test')
plt.ylabel('PC2 test')
plt.legend(loc='lower left')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
Build the network
For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.
Note: If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.
However, if you would like to get the most out of this course, try to solve all the problems without using anything from the TF Layers packages. You can still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the conv2d class, tf.layers.conv2d, you would want to use the TF Neural Network version of conv2d, tf.nn.conv2d.
Let's begin!
Input
The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions
* Implement neural_net_image_input
* Return a TF Placeholder
* Set the shape using image_shape with batch size set to None.
* Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_label_input
* Return a TF Placeholder
* Set the shape using n_classes with batch size set to None.
* Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_keep_prob_input
* Return a TF Placeholder for dropout keep probability.
* Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder.
These names will be used at the end of the project to load your saved model.
Note: None for shapes in TensorFlow allow for a dynamic size.
|
import tensorflow as tf
def neural_net_image_input(image_shape):
"""
Return a Tensor for a bach of image input
: image_shape: Shape of the images
: return: Tensor for image input.
"""
# TODO: Implement Function
return None
def neural_net_label_input(n_classes):
"""
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
"""
# TODO: Implement Function
return None
def neural_net_keep_prob_input():
"""
Return a Tensor for keep probability
: return: Tensor for keep probability.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
|
project2/files/dlnd_image_classification_instruction.ipynb
|
myfunprograms/deep_learning
|
gpl-3.0
|
Import packages
|
# Import
from __future__ import absolute_import, division, print_function
import calendar
import hashlib
import json
import math
import os
import random
import time
import uuid
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from google.cloud import bigquery
from googleads import adwords
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Authenticate your GCP account
If you are using AI Platform Notebooks, you are already authenticated so there is no need to run this step.
|
import sys
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Analyze dataset
Some charts might use a log scale.
Quantity
This sections shows how to use the BigQuery ML BUCKETIZE preprocessing function to create buckets of data for quantity and display a log scaled distribution of the qty field.
|
%%bigquery df_histo_qty --project $PROJECT_ID
WITH
min_max AS (
SELECT
MIN(qty) min_qty,
MAX(qty) max_qty,
CEIL((MAX(qty) - MIN(qty)) / 100) step
FROM
`ltv_ecommerce.10_orders`
)
SELECT
COUNT(1) c,
bucket_same_size AS bucket
FROM (
SELECT
-- Creates (1000-100)/100 + 1 buckets of data.
ML.BUCKETIZE(qty, GENERATE_ARRAY(min_qty, max_qty, step)) AS bucket_same_size,
-- Creates custom ranges.
ML.BUCKETIZE(qty, [-1, -1, -2, -3, -4, -5, 0, 1, 2, 3, 4, 5]) AS bucket_specific,
FROM
`ltv_ecommerce.10_orders`, min_max )
# WHERE bucket != "bin_1" and bucket != "bin_2"
GROUP BY
bucket
-- Ohterwise, orders bin_10 before bin_2
ORDER BY CAST(SPLIT(bucket, "_")[OFFSET(1)] AS INT64)
# Uses a log scale for bucket_same_size.
# Can remove the log scale when using bucket_specific.
plt.figure(figsize=(12, 5))
plt.title("Log scaled distribution for qty")
hqty = sns.barplot(x="bucket", y="c", data=df_histo_qty)
hqty.set_yscale("log")
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Unit price
|
%%bigquery df_histo_unit_price --project $PROJECT_ID
WITH
min_max AS (
SELECT
MIN(unit_price) min_unit_price,
MAX(unit_price) max_unit_price,
CEIL((MAX(unit_price) - MIN(unit_price)) / 10) step
FROM
`ltv_ecommerce.10_orders`
)
SELECT
COUNT(1) c,
bucket_same_size AS bucket
FROM (
SELECT
-- Creates (1000-100)/100 + 1 buckets of data.
ML.BUCKETIZE(unit_price, GENERATE_ARRAY(min_unit_price, max_unit_price, step)) AS bucket_same_size,
-- Creates custom ranges.
ML.BUCKETIZE(unit_price, [10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000]) AS bucket_specific,
FROM
`ltv_ecommerce.10_orders`, min_max )
# WHERE bucket != "bin_1" and bucket != "bin_2"
GROUP BY
bucket
-- Ohterwise, orders bin_10 before bin_2
ORDER BY CAST(SPLIT(bucket, "_")[OFFSET(1)] AS INT64)
# Uses a log scale for bucket_same_size.
# Can remove the log scale when using bucket_specific.
plt.figure(figsize=(12, 5))
q = sns.barplot(x="bucket", y="c", data=df_histo_unit_price)
q.set_yscale("log")
plt.title("Log scaled distribution for unit_price")
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Set parameters for LTV
Some parameters useful to run some of the queries in this tutorial:
WINDOW_STEP: How many days between threshold dates.
WINDOW_STEP_INITIAL: How many days between the first order and the first threshold date. A threshold date is when BigQuery computes inputs and targets.
WINDOW_LENGTH: How many days back to use for input transactions. The default value is 0 which means that this tutorial takes all transactions before the threshold date.
LENGTH_FUTURE: How far in the future to predict the monetary value. At every threshold date, BigQuery calculate the target value for all orders that happen LENGTH_FUTURE after the threshold date.
MAX_STDV_MONETARY: Standard deviation of the monetary value per customer. Removes orders per customer that have order values with a greater standard deviation.
MAX_STDV_QTY: Standard deviation of the quantity of products per customer. Removes orders per customer that have product quantity with a greater standard deviation.
TOP_LTV_RATIO: Percentage of top customers that you want to keep for your lookalike activation.
You can change those parameters to see how they impact the model especially the parameters related to the window. There is no obvious rule to set values as they depend on how data looks.
For example:
- If your customers buy multiple times a week, you could try to predict monetary value on a weekly basis.
- If you have a lot of data, you can create more windows by decreasing their sizes and possibly the number of days between threshold dates.
After multiple trials, this tutorial chose values that provides a decent result for the example dataset.
|
LTV_PARAMS = {
"WINDOW_LENGTH": 0,
"WINDOW_STEP": 30,
"WINDOW_STEP_INITIAL": 90,
"LENGTH_FUTURE": 30,
"MAX_STDV_MONETARY": 500,
"MAX_STDV_QTY": 100,
"TOP_LTV_RATIO": 0.2,
}
LTV_PARAMS
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Check distributions
This tutorial does minimum data cleansing and focuses mostly on transforming a list of transactions into workable inputs for the model.
This section checks that data is generally usable.
Per date
|
%%bigquery df_dist_dates --project $PROJECT_ID
SELECT count(1) c, SUBSTR(CAST(order_day AS STRING), 0, 7) as yyyy_mm
FROM `ltv_ecommerce.20_aggred`
WHERE qty_articles > 0
GROUP BY yyyy_mm
ORDER BY yyyy_mm
plt.figure(figsize=(12, 5))
sns.barplot(x="yyyy_mm", y="c", data=df_dist_dates)
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Orders are quite well distributed across the year despite a lower number in the early days of the dataset. You can keep this in mind when choosing a value for WINDOW_STEP_INITIAL.
Per customer
|
%%bigquery df_dist_customers --params $LTV_PARAMS --project $PROJECT_ID
SELECT customer_id, count(1) c
FROM `ltv_ecommerce.20_aggred`
GROUP BY customer_id
plt.figure(figsize=(12, 4))
sns.distplot(df_dist_customers["c"], hist_kws=dict(ec="k"), kde=False)
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
The number of transactions per customer is distributed across a few discrete values with no clear outliers.
Per quantity
This section looks at the general distribution of the number of articles per orders and check if there are some outliers.
|
%%bigquery df_dist_qty --params $LTV_PARAMS --project $PROJECT_ID
SELECT qty_articles, count(1) c
FROM `ltv_ecommerce.20_aggred`
GROUP BY qty_articles
plt.figure(figsize=(12, 4))
sns.distplot(df_dist_qty["qty_articles"], hist_kws=dict(ec="k"), kde=False)
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Dataset
|
%%bigquery --project $PROJECT_ID
-- Shows all data for a specific customer and some other random records.
SELECT * FROM `ltv_ecommerce.30_featured` WHERE customer_id = "10"
UNION ALL
(SELECT * FROM `ltv_ecommerce.30_featured` LIMIT 5)
ORDER BY customer_id, frequency, T
%%bigquery df_featured --project $PROJECT_ID
ltv_ecommerce.30_featured
df_featured.describe()
# Display distribution for all columns that are numerical (but will still ignore the categorical ones like day of the week)
valid_column_names = [
key
for key in dict(df_featured.dtypes)
if dict(df_featured.dtypes)[key] in ["float64", "int64"]
]
NUM_COLS = 5
NUM_ROWS = math.ceil(int(len(valid_column_names)) / NUM_COLS)
fig, axs = plt.subplots(nrows=NUM_ROWS, ncols=NUM_COLS, figsize=(25, 7))
for idx, cname in enumerate(valid_column_names):
x = int(idx / NUM_COLS)
y = idx % NUM_COLS
sns.violinplot(df_featured[cname], ax=axs[x, y], label=cname)
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Seems like for most values, there is a long tail of records. This is something that might required additional feature preparation even if AutoML already provides some automatic engineering. You can investigate this if you want to improve the base model.
Train the model
This tutorial uses an AutoML regressor to predict the continuous value of target_monetary.
With a non-AutoML model, you would generally need to:
1. Apply common ML patterns such as normalization or clipping.
1. Split data in two to three datasets for training, evaluating and testing.
AutoML lets you split your data:
- Manually using a column with a name for each split.
- Manually using a column that defines a time
- Automatically
This tutorial uses the latter option where AutoML automatically assigns each row to a split.
|
# You can run this query using the magic cell but the cell would run for hours.
# Although stopping the cell would not stop the query, using the Python client
# also enables you to add a custom parameter for the model name.
suffix_now = datetime.now().strftime("%Y%m%d_%H%M%S")
train_model_jobid = f"train_model_{suffix_now}"
train_model_sql = f"""
CREATE OR REPLACE MODEL `ltv_ecommerce.model_tutorial_{suffix_now}`
OPTIONS(MODEL_TYPE="AUTOML_REGRESSOR",
INPUT_LABEL_COLS=["target_monetary"],
OPTIMIZATION_OBJECTIVE="MINIMIZE_MAE")
AS SELECT
* EXCEPT(customer_id)
FROM
`ltv_ecommerce.30_featured`
"""
bq_client.query(train_model_sql, job_id=train_model_jobid)
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
This is an example of a model evaluation
Predict LTV
Predicts LTV for all customers. It uses the overall monetary value for each customer to predict a future one.
|
%%bigquery --params $LTV_PARAMS --project $PROJECT_ID
-- TODO(developer):
-- 1. Update the model name to the one you want to use.
-- 2. Update the table where to output predictions.
-- How many days back for inputs transactions. 0 means from the start.
DECLARE WINDOW_LENGTH INT64 DEFAULT @WINDOW_LENGTH;
-- Date at which an input transactions window starts.
DECLARE WINDOW_START DATE;
-- Date of the first transaction in the dataset.
DECLARE MIN_DATE DATE;
-- Date of the final transaction in the dataset.
DECLARE MAX_DATE DATE;
-- Date from which you want to predict.
DECLARE PREDICT_FROM_DATE DATE;
SET (MIN_DATE, MAX_DATE) = (
SELECT AS STRUCT
MIN(order_day) AS min_days,
MAX(order_day) AS max_days
FROM
`ltv_ecommerce.20_aggred`
);
-- You can set any date here. In production, it is generally today.
SET PREDICT_FROM_DATE = MAX_DATE;
IF WINDOW_LENGTH != 0 THEN
SET WINDOW_START = DATE_SUB(PREDICT_FROM_DATE, INTERVAL WINDOW_LENGTH DAY);
ELSE
SET WINDOW_START = MIN_DATE;
END IF;
CREATE OR REPLACE TABLE `ltv_ecommerce.predictions_tutorial`
AS (
SELECT
customer_id,
monetary AS monetary_so_far,
ROUND(predicted_target_monetary, 2) AS monetary_predicted,
ROUND(predicted_target_monetary - monetary, 2) AS monetary_future
FROM
ML.PREDICT(
-- /!\ Set your model name here.
MODEL ltv_ecommerce.model_tutorial_YYYYMMDD,
(
SELECT
customer_id,
ROUND(monetary_orders, 2) AS monetary,
cnt_orders AS frequency,
recency,
T,
ROUND(recency/cnt_orders, 2) AS time_between,
ROUND(avg_basket_value, 2) AS avg_basket_value,
ROUND(avg_basket_size, 2) AS avg_basket_size,
has_returns,
CEIL(avg_time_to_return) AS avg_time_to_return,
num_returns
FROM (
SELECT
customer_id,
SUM(value) AS monetary_orders,
DATE_DIFF(MAX(order_day), MIN(order_day), DAY) AS recency,
DATE_DIFF(PREDICT_FROM_DATE, MIN(order_day), DAY) AS T,
COUNT(DISTINCT order_day) AS cnt_orders,
AVG(qty_articles) avg_basket_size,
AVG(value) avg_basket_value,
CASE
WHEN SUM(num_returns) > 0 THEN 'y'
ELSE 'n'
END AS has_returns,
AVG(time_to_return) avg_time_to_return,
SUM(num_returns) num_returns,
FROM
`ltv_ecommerce.20_aggred`
WHERE
order_day <= PREDICT_FROM_DATE AND
order_day >= WINDOW_START
GROUP BY
customer_id
)
)
)
)
%%bigquery df_predictions --project $PROJECT_ID
ltv_ecommerce.predictions_windowed
df_predictions.describe()
from matplotlib.gridspec import GridSpec
fig = plt.figure(constrained_layout=True, figsize=(15, 5))
gs = GridSpec(2, 2, figure=fig)
sns.set(font_scale=1)
plt.tick_params(axis="x", labelsize=14)
ax0 = plt.subplot(gs.new_subplotspec((0, 0), colspan=1))
ax1 = plt.subplot(gs.new_subplotspec((0, 1), colspan=1))
ax2 = plt.subplot(gs.new_subplotspec((1, 0), colspan=2))
sns.violinplot(df_predictions["monetary_so_far"], ax=ax0, label="monetary_so_far")
sns.violinplot(df_predictions["monetary_predicted"], ax=ax1, label="monetary_predicted")
sns.violinplot(df_predictions["monetary_future"], ax=ax2, label="monetary_future")
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
The monetary distribution analysis shows small monetary amounts for the next month compare to the overall historical value. The difference is about 3 to 4 orders of magnitude.
One reason is that the model is trained to predict the value for the next month (LENGTH_FUTURE = 30).
You can play around with that value to train and predict for the next quarter for example (LENGTH_FUTURE = 90)
Activation
This part shows how to activate on Google Ads using similar audience.
You can follow a similar process for Facebook for example
Extract top customers
This step extracts the top 20% customers with the highest future monetary value and join with a CRM table to get their email.
The prediction used the overall monetary value but in this use case, we are looking at the most valuable in the future. You can modify the PERCENT_RANK to use another KPI.
|
%%bigquery df_top_ltv --params $LTV_PARAMS --project $PROJECT_ID
DECLARE TOP_LTV_RATIO FLOAT64 DEFAULT @TOP_LTV_RATIO;
SELECT
p.customer_id,
monetary_future,
c.email AS email
FROM (
SELECT
customer_id,
monetary_future,
PERCENT_RANK() OVER (ORDER BY monetary_future DESC) AS percent_rank_monetary
FROM
`ltv_ecommerce.predictions_windowed` ) p
-- This creates fake emails. You need to join with your own CRM table.
INNER JOIN (
SELECT
customer_id,
email
FROM
`ltv_ecommerce.00_crm` ) c
ON
p.customer_id = CAST(c.customer_id AS STRING)
WHERE
-- Decides the size of your list of emails. For similar-audience use cases
-- where you need to find a minimum of matching emails, 20% should provide
-- enough potential emails.
percent_rank_monetary <= TOP_LTV_RATIO
ORDER BY monetary_future DESC
df_top_ltv.head(5)
# Shows distribution of the predicted monetary value for the top LTV customers.
print(df_top_ltv.describe())
fig, axs = plt.subplots()
sns.set(font_scale=1.2)
sns.distplot(df_top_ltv["monetary_future"])
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Setup Adwords client
Creates the configuration YAML file for the Google Ads client. You need to:
1. Create Client ID and Secret using the Cloud Console
2. Follow these steps
|
# Sets your variables.
if "google.colab" in sys.modules:
from google.colab import files
ADWORDS_FILE = "/tmp/adwords.yaml"
DEVELOPER_TOKEN = "[YOUR_DEVELOPER_TOKEN]"
OAUTH_2_CLIENT_ID = "[YOUR_OAUTH_2_CLIENT_ID]"
CLIENT_SECRET = "[YOUR_CLIENT_SECRET]"
REFRESH_TOKEN = "[YOUR_REFRESH_TOKEN]"
# Creates a local YAML file
adwords_content = f"""
# AdWordsClient configurations
adwords:
#############################################################################
# Required Fields #
#############################################################################
developer_token: {DEVELOPER_TOKEN}
#############################################################################
# Optional Fields #
#############################################################################
# client_customer_id: INSERT_CLIENT_CUSTOMER_ID_HERE
# user_agent: INSERT_USER_AGENT_HERE
# partial_failure: True
# validate_only: True
#############################################################################
# OAuth2 Configuration #
# Below you may provide credentials for either the installed application or #
# service account flows. Remove or comment the lines for the flow you're #
# not using. #
#############################################################################
# The following values configure the client for the installed application
# flow.
client_id: {OAUTH_2_CLIENT_ID}
client_secret: {CLIENT_SECRET}
refresh_token: {REFRESH_TOKEN}
# The following values configure the client for the service account flow.
# path_to_private_key_file: INSERT_PATH_TO_JSON_KEY_FILE_HERE
# delegated_account: INSERT_DOMAIN_WIDE_DELEGATION_ACCOUNT
#############################################################################
# ReportDownloader Headers #
# Below you may specify boolean values for optional headers that will be #
# applied to all requests made by the ReportDownloader utility by default. #
#############################################################################
# report_downloader_headers:
# skip_report_header: False
# skip_column_header: False
# skip_report_summary: False
# use_raw_enum_values: False
# AdManagerClient configurations
ad_manager:
#############################################################################
# Required Fields #
#############################################################################
application_name: INSERT_APPLICATION_NAME_HERE
#############################################################################
# Optional Fields #
#############################################################################
# The network_code is required for all services except NetworkService:
# network_code: INSERT_NETWORK_CODE_HERE
# delegated_account: INSERT_DOMAIN_WIDE_DELEGATION_ACCOUNT
#############################################################################
# OAuth2 Configuration #
# Below you may provide credentials for either the installed application or #
# service account (recommended) flows. Remove or comment the lines for the #
# flow you're not using. #
#############################################################################
# The following values configure the client for the service account flow.
path_to_private_key_file: INSERT_PATH_TO_JSON_KEY_FILE_HERE
# delegated_account: INSERT_DOMAIN_WIDE_DELEGATION_ACCOUNT
# The following values configure the client for the installed application
# flow.
# client_id: INSERT_OAUTH_2_CLIENT_ID_HERE
# client_secret: INSERT_CLIENT_SECRET_HERE
# refresh_token: INSERT_REFRESH_TOKEN_HERE
# Common configurations:
###############################################################################
# Compression (optional) #
# Below you may specify whether to accept and automatically decompress gzip #
# encoded SOAP requests. By default, gzip compression is not enabled. #
###############################################################################
# enable_compression: False
###############################################################################
# Logging configuration (optional) #
# Below you may specify the logging configuration. This will be provided as #
# an input to logging.config.dictConfig. #
###############################################################################
# logging:
# version: 1
# disable_existing_loggers: False
# formatters:
# default_fmt:
# format: ext://googleads.util.LOGGER_FORMAT
# handlers:
# default_handler:
# class: logging.StreamHandler
# formatter: default_fmt
# level: INFO
# loggers:
# Configure root logger
# "":
# handlers: [default_handler]
# level: INFO
###############################################################################
# Proxy configurations (optional) #
# Below you may specify an HTTP or HTTPS Proxy to be used when making API #
# requests. Note: You must specify the scheme used for the proxy endpoint. #
# #
# For additional information on configuring these values, see: #
# http://docs.python-requests.org/en/master/user/advanced/#proxies #
###############################################################################
# proxy_config:
# http: INSERT_HTTP_PROXY_URI_HERE
# https: INSERT_HTTPS_PROXY_URI_HERE
# If specified, the given cafile will only be used if certificate validation
# is not disabled.
# cafile: INSERT_PATH_HERE
# disable_certificate_validation: False
################################################################################
# Utilities Included (optional) #
# Below you may specify whether the library will include utilities used in the #
# user agent. By default, the library will include utilities used in the user #
# agent. #
################################################################################
# include_utilities_in_user_agent: True
################################################################################
# Custom HTTP headers (optional) #
# Specify one or more custom headers to pass along with all requests to #
# the API. #
################################################################################
# custom_http_headers:
# X-My-Header: 'content'
"""
with open(ADWORDS_FILE, "w") as adwords_file:
print(adwords_content, file=adwords_file)
# Google Ads client
# adwords_client = adwords.AdWordsClient.LoadFromStorage(ADWORDS_FILE)
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
Create an AdWords user list
Using emails of the top LTV customers, you create an AdWords list. If more than 5000 of the users are matched with AdWords email, a similar audience list will be created.
Note that this guide uses fake emails so running these steps is not going to work but you can leverage this code with emails coming from your CRM.
|
ltv_emails = list(set(df_top_ltv["email"]))
# https://developers.google.com/adwords/api/docs/samples/python/remarketing#create-and-populate-a-user-list
# https://github.com/googleads/googleads-python-lib/blob/7c41584c65759b6860572a13bde65d7395c5b2d8/examples/adwords/v201809/remarketing/add_crm_based_user_list.py
# """Adds a user list and populates it with hashed email addresses.
# Note: It may take several hours for the list to be populated with members. Email
# addresses must be associated with a Google account. For privacy purposes, the
# user list size will show as zero until the list has at least 1000 members. After
# that, the size will be rounded to the two most significant digits.
# """
# def normalize_and_SHA256(s):
# """Normalizes (lowercase, remove whitespace) and hashes a string with SHA-256.
# Args:
# s: The string to perform this operation on.
# Returns:
# A normalized and SHA-256 hashed string.
# """
# return hashlib.sha256(s.strip().lower()).hexdigest()
# def create_user_list(client):
# # Initialize appropriate services.
# user_list_service = client.GetService('AdwordsUserListService', 'v201809')
# user_list = {
# 'xsi_type': 'CrmBasedUserList',
# 'name': f'Customer relationship management list #{uuid.uuid4()}',
# 'description': 'A list of customers that originated from email addresses',
# # CRM-based user lists can use a membershipLifeSpan of 10000 to indicate
# # unlimited; otherwise normal values apply.
# 'membershipLifeSpan': 30,
# 'uploadKeyType': 'CONTACT_INFO'
# }
# # Create an operation to add the user list.
# operations = [{
# 'operator': 'ADD',
# 'operand': user_list
# }]
# result = user_list_service.mutate(operations)
# user_list_id = result['value'][0]['id']
# emails = ltv_emails
# members = [{'hashedEmail': normalize_and_SHA256(email)} for email in emails]
# mutate_members_operation = {
# 'operand': {
# 'userListId': user_list_id,
# 'membersList': members
# },
# 'operator': 'ADD'
# }
# response = user_list_service.mutateMembers([mutate_members_operation])
# if 'userLists' in response:
# for user_list in response['userLists']:
# print('User list with name "%s" and ID "%d" was added.'
# % (user_list['name'], user_list['id']))
# create_user_list(adwords_client)
|
notebooks/community/analytics-componetized-patterns/retail/ltv/bqml/notebooks/bqml_automl_ltv_activate_lookalike.ipynb
|
GoogleCloudPlatform/bigquery-notebooks
|
apache-2.0
|
SWAP
|
swap()
|
examples/quantum-gates.ipynb
|
ajgpitch/qutip-notebooks
|
lgpl-3.0
|
ISWAP
|
iswap()
|
examples/quantum-gates.ipynb
|
ajgpitch/qutip-notebooks
|
lgpl-3.0
|
From QuTiP 4.4, we can also add gate at arbitrary position in a circuit.
|
qc1.add_gate("CSIGN", index=1)
qc1.png
|
examples/quantum-gates.ipynb
|
ajgpitch/qutip-notebooks
|
lgpl-3.0
|
Adding gate in the middle of a circuit
From QuTiP 4.4 one can add a gate at an arbitrary position of a circuit. All one needs to do is to specify the parameter index. With this, we can also add the same gate at multiple positions at the same time.
|
qc = QubitCircuit(1)
qc.add_gate("RX", targets=1)
qc.add_gate("RX", targets=1)
qc.add_gate("RY", targets=1, index=[1,0])
qc.gates
|
examples/quantum-gates.ipynb
|
ajgpitch/qutip-notebooks
|
lgpl-3.0
|
If
If statements can be use to execute some lines or block of code if a particular condition is satisfied. E.g. Let's print something based on the entries in the list.
|
for instructor in instructors:
if not "Clown" in instructor:
print(instructor)
else:
pass
for instructor in instructors:
if "Clown" in instructor:
pass
else:
print(instructor)
for instructor in instructors:
if not "Clown" in instructor:
print(instructor)
elif not "z" in instructor:
print(instructor)
else:
pass
if 'Dorkus the Clown' in instructors:
print('#fakeinstructor')
|
week_1/procedural_python/flow_of_control.ipynb
|
UWSEDS/LectureNotes
|
bsd-2-clause
|
You can combine loops and conditionals:
|
for instructor in instructors:
if instructor.endswith('Clown'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
# Loops can be nested
for i in range(1, 4):
for j in range(1, 4):
print('%d * %d = %d' % (i, j, i*j)) # Note string formatting here, %d means an integer
# Can exist loop if a condition is met
for i in range(10):
if i == 4:
break
|
week_1/procedural_python/flow_of_control.ipynb
|
UWSEDS/LectureNotes
|
bsd-2-clause
|
Programming Example
Write a script that finds the first N prime numbers.
|
4 % 2
N = 100
for candidate in range(2, N):
# n is candidate prime. Check if n is prime
is_prime = True
for m in range(2, candidate):
if (candidate % m) == 0:
is_prime = False
break
if is_prime:
print("%d is prime!" % candidate)
|
week_1/procedural_python/flow_of_control.ipynb
|
UWSEDS/LectureNotes
|
bsd-2-clause
|
With the model loaded, you can process text like this:
|
doc = nlp("Tea is healthy and calming, don't you think?")
|
notebooks/nlp/raw/tut1.ipynb
|
Kaggle/learntools
|
apache-2.0
|
There's a lot you can do with the doc object you just created.
Tokenizing
This returns a document object that contains tokens. A token is a unit of text in the document, such as individual words and punctuation. SpaCy splits contractions like "don't" into two tokens, "do" and "n't". You can see the tokens by iterating through the document.
|
for token in doc:
print(token)
|
notebooks/nlp/raw/tut1.ipynb
|
Kaggle/learntools
|
apache-2.0
|
Iterating through a document gives you token objects. Each of these tokens comes with additional information. In most cases, the important ones are token.lemma_ and token.is_stop.
Text preprocessing
There are a few types of preprocessing to improve how we model with words. The first is "lemmatizing."
The "lemma" of a word is its base form. For example, "walk" is the lemma of the word "walking". So, when you lemmatize the word walking, you would convert it to walk.
It's also common to remove stopwords. Stopwords are words that occur frequently in the language and don't contain much information. English stopwords include "the", "is", "and", "but", "not".
With a spaCy token, token.lemma_ returns the lemma, while token.is_stop returns a boolean True if the token is a stopword (and False otherwise).
|
print(f"Token \t\tLemma \t\tStopword".format('Token', 'Lemma', 'Stopword'))
print("-"*40)
for token in doc:
print(f"{str(token)}\t\t{token.lemma_}\t\t{token.is_stop}")
|
notebooks/nlp/raw/tut1.ipynb
|
Kaggle/learntools
|
apache-2.0
|
Why are lemmas and identifying stopwords important? Language data has a lot of noise mixed in with informative content. In the sentence above, the important words are tea, healthy and calming. Removing stop words might help the predictive model focus on relevant words. Lemmatizing similarly helps by combining multiple forms of the same word into one base form ("calming", "calms", "calmed" would all change to "calm").
However, lemmatizing and dropping stopwords might result in your models performing worse. So you should treat this preprocessing as part of your hyperparameter optimization process.
Pattern Matching
Another common NLP task is matching tokens or phrases within chunks of text or whole documents. You can do pattern matching with regular expressions, but spaCy's matching capabilities tend to be easier to use.
To match individual tokens, you create a Matcher. When you want to match a list of terms, it's easier and more efficient to use PhraseMatcher. For example, if you want to find where different smartphone models show up in some text, you can create patterns for the model names of interest. First you create the PhraseMatcher itself.
|
from spacy.matcher import PhraseMatcher
matcher = PhraseMatcher(nlp.vocab, attr='LOWER')
|
notebooks/nlp/raw/tut1.ipynb
|
Kaggle/learntools
|
apache-2.0
|
The matcher is created using the vocabulary of your model. Here we're using the small English model you loaded earlier. Setting attr='LOWER' will match the phrases on lowercased text. This provides case insensitive matching.
Next you create a list of terms to match in the text. The phrase matcher needs the patterns as document objects. The easiest way to get these is with a list comprehension using the nlp model.
|
terms = ['Galaxy Note', 'iPhone 11', 'iPhone XS', 'Google Pixel']
patterns = [nlp(text) for text in terms]
matcher.add("TerminologyList", patterns)
|
notebooks/nlp/raw/tut1.ipynb
|
Kaggle/learntools
|
apache-2.0
|
Then you create a document from the text to search and use the phrase matcher to find where the terms occur in the text.
|
# Borrowed from https://daringfireball.net/linked/2019/09/21/patel-11-pro
text_doc = nlp("Glowing review overall, and some really interesting side-by-side "
"photography tests pitting the iPhone 11 Pro against the "
"Galaxy Note 10 Plus and last year’s iPhone XS and Google Pixel 3.")
matches = matcher(text_doc)
print(matches)
|
notebooks/nlp/raw/tut1.ipynb
|
Kaggle/learntools
|
apache-2.0
|
The matches here are a tuple of the match id and the positions of the start and end of the phrase.
|
match_id, start, end = matches[0]
print(nlp.vocab.strings[match_id], text_doc[start:end])
|
notebooks/nlp/raw/tut1.ipynb
|
Kaggle/learntools
|
apache-2.0
|
We initialize the simulation and generate the grid
in the complex plane.
|
size = 200
iterations = 100
|
4_Cython.ipynb
|
thewtex/ieee-nss-mic-scipy-2014
|
apache-2.0
|
Pure Python
|
def mandelbrot_python(m, size, iterations):
for i in range(size):
for j in range(size):
c = -2 + 3./size*j + 1j*(1.5-3./size*i)
z = 0
for n in range(iterations):
if np.abs(z) <= 10:
z = z*z + c
m[i, j] = n
else:
break
%%timeit -n1 -r1 m = np.zeros((size, size))
mandelbrot_python(m, size, iterations)
|
4_Cython.ipynb
|
thewtex/ieee-nss-mic-scipy-2014
|
apache-2.0
|
Cython versions
We first import Cython.
|
%load_ext cythonmagic
|
4_Cython.ipynb
|
thewtex/ieee-nss-mic-scipy-2014
|
apache-2.0
|
Take 1
First, we just add the %%cython magic.
|
%%cython -a
import numpy as np
def mandelbrot_cython(m, size, iterations):
for i in range(size):
for j in range(size):
c = -2 + 3./size*j + 1j*(1.5-3./size*i)
z = 0
for n in range(iterations):
if np.abs(z) <= 10:
z = z*z + c
m[i, j] = n
else:
break
%%timeit -n1 -r1 m = np.zeros((size, size), dtype=np.int32)
mandelbrot_cython(m, size, iterations)
|
4_Cython.ipynb
|
thewtex/ieee-nss-mic-scipy-2014
|
apache-2.0
|
Virtually no speedup.
Take 2
Now, we add type information, using memory views for NumPy arrays.
|
%%cython -a
import numpy as np
def mandelbrot_cython(int[:,::1] m,
int size,
int iterations):
cdef int i, j, n
cdef complex z, c
for i in range(size):
for j in range(size):
c = -2 + 3./size*j + 1j*(1.5-3./size*i)
z = 0
for n in range(iterations):
if z.real**2 + z.imag**2 <= 100:
z = z*z + c
m[i, j] = n
else:
break
%%timeit -n1 -r1 m = np.zeros((size, size), dtype=np.int32)
mandelbrot_cython(m, size, iterations)
|
4_Cython.ipynb
|
thewtex/ieee-nss-mic-scipy-2014
|
apache-2.0
|
Vertex client library: Custom training image classification model with custom container for online prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_image_classification_online_container.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_image_classification_online_container.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex client library for Python to train using a custom container and deploy a custom image classification model for online prediction.
Dataset
The dataset used for this tutorial is the CIFAR10 dataset from TensorFlow Datasets. The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck.
Objective
In this tutorial, you create a custom model from a Python script in a custom Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using gcloud command-line tool or online using Google Cloud Console.
The steps performed include:
Create a Vertex custom job for training a model.
Train a TensorFlow model using a custom container.
Retrieve and load the model artifacts.
View the model evaluation.
Upload the model as a Vertex Model resource.
Deploy the Model resource to a serving Endpoint resource.
Make a prediction.
Undeploy the Model resource.
Costs
This tutorial uses billable components of Google Cloud (GCP):
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Installation
Install the latest version of Vertex client library.
|
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
|
notebooks/community/gapic/custom/showcase_custom_image_classification_online_container.ipynb
|
GoogleCloudPlatform/vertex-ai-samples
|
apache-2.0
|
Train a model
There are two ways you can train a custom model using a container image:
Use a Google Cloud prebuilt container. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.
Use your own custom container image. If you use your own container, the container needs to contain your code for training a custom model.
Create a Docker file
In this tutorial, you train a CIFAR10 model using your own custom container.
To use your own custom container, you build a Docker file. First, you will create a directory for the container components.
Examine the training package
Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
PKG-INFO
README.md
setup.cfg
setup.py
trainer
__init__.py
task.py
The files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image.
The file trainer/task.py is the Python script for executing the custom training job. Note, when we referred to it in the worker pool specification, we replace the directory slash with a dot (trainer.task) and dropped the file suffix (.py).
Package Assembly
In the following cells, you will assemble the training package.
|
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
|
notebooks/community/gapic/custom/showcase_custom_image_classification_online_container.ipynb
|
GoogleCloudPlatform/vertex-ai-samples
|
apache-2.0
|
Select seeds for search networks
I select small (1000-1500) sized bot network and pick 4 random members from it
|
seeds = ['volya_belousova', 'egor4rgurev', 'kirillfrolovdw', 'ilyazhuchhj']
auth = tweepy.OAuthHandler(OAUTH_KEY, OAUTH_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
graph = Graph(user=NEO4J_USER, password=NEO4J_SECRET)
def get_follwers_by_id(account_id):
ids = []
for page in tweepy.Cursor(api.followers_ids, user_id=account_id).pages():
print("FOLLOWERS: Next page for %s" % account_id)
ids.extend(page)
return ids
def get_friends_by_id(account_id):
ids = []
for page in tweepy.Cursor(api.friends_ids, user_id=account_id).pages():
print("FRIENDS: Next page for %s" % account_id)
ids.extend(page)
return ids
def get_friends(account):
ids = []
for page in tweepy.Cursor(api.friends_ids, screen_name=account).pages():
print("Next page for %s" % account)
ids.extend(page)
return ids
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Now search for friends of seed users
|
friend_ids = {}
for account in seeds:
friend_ids[account] = get_friends(account)
commons = {}
for first in seeds:
for second in seeds:
if first != second:
commons[(first, second)] = list(set(friend_ids[first]) & set(friend_ids[second]))
all_users = friend_ids[seeds[0]]
for name in seeds:
all_users = list(set(all_users) | set(friend_ids[name]))
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Show common users in total and per seed user
|
display("Common users: {0}".format(len(all_users)))
html = ["<table width=100%>"]
html.append('<tr><td></td>')
for name in seeds:
html.append('<td>{0}</td>'.format(name))
html.append('</tr>')
for first in seeds:
html.append('<tr><td>{0}</td>'.format(first))
for second in seeds:
if first != second:
html.append('<td>{0}</td>'.format(len(commons[(first,second)])))
else:
html.append('<td>x</td>')
html.append("</tr>")
html.append('</table>')
HTML(''.join(html))
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Now search and populate neo4j database
|
graph.run("CREATE CONSTRAINT ON (u:UserRes) ASSERT u.id IS UNIQUE")
processed_users = []
for user_id in all_users:
if user_id not in processed_users:
user = Node("UserRes", id=user_id)
graph.merge(user)
try:
for friend_id in get_follwers_by_id(user_id):
if friend_id in all_users:
friend = Node("UserRes", id=friend_id)
graph.merge(friend)
graph.merge(Relationship(friend, "FRIEND_OF", user))
for friend_id in get_friends_by_id(user_id):
if friend_id in all_users:
friend = Node("UserRes", id=friend_id)
graph.merge(friend)
graph.merge(Relationship(user, "FRIEND_OF", friend))
except tweepy.TweepError:
print("User {0} has protected followers/friends".format(user_id))
processed_users.append(user_id)
print(float(len(processed_users)) / float(len(all_users)) * 100.0)
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Get all users from neo4j and build graph
|
query = """
MATCH (user1:UserRes)-[:FRIEND_OF]->(user2:UserRes),
(user2:UserRes)-[:FRIEND_OF]->(user1)
RETURN user1.id, user2.id
"""
data = graph.run(query)
ig = IGraph.TupleList(data, weights=False)
ig.es["width"] = 1
ig.simplify(combine_edges={ "width": "sum" })
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Let's cluster graph and search for communities
|
clusters = IGraph.community_fastgreedy(ig)
clusters = clusters.as_clustering()
print("Found %d clusters" % len(clusters))
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Let's make clusters dataframe
|
nodes = [{"id": node.index, "name": node["name"]} for node in ig.vs]
for node in nodes:
node["cluster"] = clusters.membership[node["id"]]
nodes_df = pd.DataFrame(nodes)
edges = [{"source": x[0], "target": x[1]} for x in ig.get_edgelist()]
edges_df = pd.DataFrame(edges)
edges_counts = edges_df.groupby('source').count().reset_index().rename(columns = {'target': 'count'})
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Let's look to all clusters closely
|
nodes_df.groupby('cluster').count()
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
We have only two clusters with significant user count.
Let's check first
|
first_cluster = nodes_df[nodes_df["cluster"] == 0][["id", "name"]]
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Join edges to users
|
first_cluster_counts = first_cluster.set_index('id').join(edges_counts.set_index('source')).reset_index()
first_cluster_counts["count"].hist()
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Let's look to all groups
|
for group in range(20):
start = group * 100
stop = (group + 1) * 100
users_slice = first_cluster_counts[(first_cluster_counts["count"] > start) & (first_cluster_counts["count"] < stop)]
print("Users from %d to %d has %d" %(start, stop, users_slice.count()[0]))
display(users_slice[:10])
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Looks like most bot accounts has followers/follows count from 1200 to 1900
Let's filter it
|
filtered_bots = first_cluster_counts[(first_cluster_counts["count"] > 1200) & (first_cluster_counts["count"] < 1900)]
print("We found %s bots in first approximation" % filtered_bots.count()[0])
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Now collect all information from these accounts and search for corellations
|
first_cluster_bots = []
for group in chunks(filtered_bots["name"].values, 100):
for user in api.lookup_users(user_ids=list(group)):
first_cluster_bots.append(user)
locations = [user.location for user in first_cluster_bots]
first_cluster_bots[0].favourites_count
possible_bot_users = pd.DataFrame([{'name': user.name, 'id': user.id, 'location': user.location, 'screen_name': user.screen_name, 'followers': user.followers_count, 'friends': user.friends_count, 'created_at': user.created_at, 'favorites': user.favourites_count} for user in first_cluster_bots])
possible_bot_users.hist()
possible_bot_users[["id", "location"]].groupby('location').count().plot(kind='bar')
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Ok, we have two significant values. Moscow and New York. Let's split dataset
|
moscow_users = possible_bot_users[possible_bot_users["location"] == u'Москва']
moscow_users.hist()
moscow_users[:10]
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Now check NY users
|
ny_users = possible_bot_users[possible_bot_users["location"] == u'New York, USA']
ny_users.hist()
ny_users[:10]
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Conclusion
We have one twitter bot network on two languages: Russian and English.
All bots have deep linking and posts random sentences every hour.
|
print("Moscow bots: %d, NY bots: %d, Total: %d" % (moscow_users.count()[0], ny_users.count()[0], moscow_users.count()[0] + ny_users.count()[0]))
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Now export moscow and ny users to csv
|
ny_users.append(moscow_users).to_csv("./moscow_ny_bots.csv", encoding='utf8')
|
Twitter bots/Botnet search.ipynb
|
UserAd/data_science
|
mit
|
Loading and visualizing the input data
|
# read the peaks
flt = columnfile('sma_261N.flt.new')
# peaks indexed to phase 1
phase1 = flt.copy()
phase1.filter( phase1.labels > -1 )
# unindexed peaks (phase 2 + unindexed phase 1?)
phase2 = flt.copy()
phase2.filter( phase2.labels == -1 )
#plot radial transform for phase 1
plt.plot( phase1.tth_per_grain, phase1.eta_per_grain, 'x')
plt.xlabel( r'$ 2 \theta \, (\degree) $' )
plt.ylabel( r'$ \eta \, (\degree) $' )
plt.title( r'$Diffraction \, angles$' )
|
sandbox/weighted_kde/3DXRD diffractogram from filtered peaks.ipynb
|
jonwright/ImageD11
|
gpl-2.0
|
Plotting the diffraction profile
|
# Probability density function (pdf) of 2theta
# weighted by the peak intensity and using default 2theta bandwidth
I_phase1 = phase1.sum_intensity * phase1.Lorentz_per_grain
pdf = wkde.gaussian_kde( phase1.tth_per_grain, weights = I_phase1)
# Plotting it over 2theta range
x = np.linspace( min(flt.tth), max(flt.tth), 500 )
y = pdf(x)
plt.plot(x, y)
plt.xlabel( r'$ 2 \theta \, (\degree) $' )
plt.ylabel( r'$ I $' )
plt.yticks([])
plt.title( ' With bandwidth = %.3f'%pdf.factor )
|
sandbox/weighted_kde/3DXRD diffractogram from filtered peaks.ipynb
|
jonwright/ImageD11
|
gpl-2.0
|
The profile showed above is highly smoothed and the hkl peaks are merged.<br>
$\to$ A Smaller bandwidth should be used.
Choosing the right bandwidth of the estimator
The bandwidth can be passed as argument to the gaussian_kde() object or set afterward using the later set_badwidth() method. For example, the bandwidth can be reduced by a factor of 100 with respect to its previous value:
Python
gaussian_kde().set_bandwidth( gaussian_kde().factor / 100 )
|
pdf_phase1 = wkde.gaussian_kde( phase1.tth, weights = phase1.sum_intensity )
pdf_phase2 = wkde.gaussian_kde( phase2.tth, weights = phase2.sum_intensity )
frac_phase1 = np.sum( phase1.sum_intensity ) / np.sum( flt.sum_intensity )
frac_phase2 = np.sum( phase2.sum_intensity ) / np.sum( flt.sum_intensity )
from ipywidgets import interact
bw_range = ( 0.001, pdf_phase1.factor/3, 0.001)
@interact( bandwidth = bw_range)
def plot_pdf(bandwidth):
pdf_phase1.set_bandwidth(bandwidth)
pdf_phase2.set_bandwidth(bandwidth)
y_phase1 = pdf_phase1(x)
y_phase2 = pdf_phase2(x)
plt.plot( x, frac_phase1 * y_phase1, label = r'$Phase \, 1$' )
plt.plot( x, frac_phase2 * y_phase2, label = r'$Phase \, 2$' )
plt.legend(loc='best')
plt.xlabel( r'$ 2 \theta \, (\degree) $' )
plt.ylabel( r'$ I $' )
plt.yticks([])
plt.title( r'$ 3DXRD \, diffractogram $' )
|
sandbox/weighted_kde/3DXRD diffractogram from filtered peaks.ipynb
|
jonwright/ImageD11
|
gpl-2.0
|
Read some data
|
df1 = pd.read_csv('/Users/atma6951/Documents/code/pychakras/pychakras/udemy_ml_bootcamp/Python-for-Data-Visualization/Pandas Built-in Data Viz/df1', index_col=0)
df2 = pd.read_csv('/Users/atma6951/Documents/code/pychakras/pychakras/udemy_ml_bootcamp/Python-for-Data-Visualization/Pandas Built-in Data Viz/df2')
df1.head()
df2.head()
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
3 ways of calling plot from a DataFrame
df.plot() and specify the plot type, the X and Y columns etc
df.plot.hist() calling plot in OO fashion. Only specify teh X and Y and color or size columns
df['column'].plot.plotname() - calling plot on a series
Types of plot that can be called: area, bar, line, scatter, box, hexbin, kde etc.
Ways of plotting histogram
|
df1.plot(x='A', kind='hist')
df1['A'].plot.hist(bins=30)
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Plotting a histogram of all numeric columns in the dataframe:
|
df1.hist()
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
In reality, you have a lot more columns. You can prettify the above by creating a layout and figsize:
|
ax_list = df1.hist(bins=25, layout=(2,2), figsize=(7,7))
plt.tight_layout()
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Plotting histogram of all columns and sharing axes
The chart above might make more sense if you shared the X as well as Y axes for different columns. This helps in comparing the distribution of values visually.
|
ax_list = df1.hist(bins=25, sharex=True, sharey=True, layout=(1,4), figsize=(15,4))
ax_list = df1.hist(bins=25, sharex=True, sharey=True, layout=(2,2), figsize=(8,8))
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Backgrounds
You can specify dark or white background and style info to the matplotlib that is used behind the scenes.
Area plot
|
plt.style.use('dark_background')
df2.plot.area()
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Bar chart
Another style is fivethirtyeight
|
plt.style.use('fivethirtyeight')
df2.plot.bar()
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Line plot
This is suited for time series data
|
#reset the style
plt.style.use('default')
# pass figsize to the matplotlib backend engine and `lw` is line width
df1.plot.line(x=df1.index, y='A', figsize=(12,2), lw=1)
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Scatter plot
Use colormap or size to bring in a visualize a 3rd variable in your scatter
|
df1.plot.scatter(x='A', y='B',c='C', cmap='coolwarm')
# you could specify size s='c' however the points come out tiny.
# had to scale it by 100, hence using actual series data and not the column name
df2.plot.scatter(x='a',y='b', s=df2['c']*100)
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
KDE plots
To visualize the density of data
|
df1['A'].plot.kde()
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Visualize the density of all columns in one plot
|
df1.plot.kde()
df2.plot.density() #I think density is an alias to KDE
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Making wordclouds from text fields
Word clouds are a great way to visualize frequency of certain terms that appear in the data set. This is accomplished using the library wordcloud. You can install it as
conda install -c conda-forge wordcloud
|
registrant_df = pd.read_csv('./registrant.csv')
registrant_df.head()
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Now, let us plot the responses from the column What would you like to learn? as a word cloud. First, we need to turn the series into a paragraph.
|
obj_series = registrant_df['What would you like to learn?'].dropna()
obj_list = list(obj_series)
obj_string = ' '.join(obj_list)
obj_string
from wordcloud import WordCloud
wc = WordCloud(width=1000, height=600, background_color='white')
obj_wc_img = wc.generate_from_text(obj_string)
plt.figure(figsize=(20,10))
plt.imshow(obj_wc_img, interpolation="bilinear")
plt.axis('off')
plt.title('What would you like to learn?');
|
python_crash_course/pandas_data_viz_1.ipynb
|
AtmaMani/pyChakras
|
mit
|
Create a DataFrame object
Creat DataFrame by reading a file
|
mtcars = spark.read.csv(path='../../data/mtcars.csv',
sep=',',
encoding='UTF-8',
comment=None,
header=True,
inferSchema=True)
mtcars.show(n=5, truncate=False)
|
notebooks/01-data-strcture/1.2-dataframe.ipynb
|
MingChen0919/learning-apache-spark
|
mit
|
Actuators
A multiprocessing block may need to interact asynchronously with some external device. To do so, the block puts data into a queue and uses threads responsible for interfacing between the queue and the device. This simple example illustrates the simplest actuator: a printer. Indeed printing can be done synchronously by the multiprocessing block. Printing doesn't need a queue to interface between it and the block. We use the printer in this example to illustrate the idea.
<br>
<br>
Function <i>g</i> of process <i>p1</i> has an agent called 'copy_stream_s_to_queue_q' which copies stream <i>s</i> to queue <i>q</i>. A thread, <b>my_thread</b> in <i>p1</i> prints values from the queue; this thread represents the thread that interfaces with an external actuator device. This thread is in addition to any source threads that may exist.
<br>
<br>
Queue <i>q</i> is specified as an <b>output queue</b>. An output queue gets a special message <b>'_finished'</b> when the multiprocess block terminates.
<br>
<br>
Threads (apart from source threads) and output queues are specified in <i>multicore_specifications</i>. See
<br>
<br>
{'name': 'p1', 'agent': g, 'inputs': ['y'],
<br>
'args': [q], <b>'output_queues'</b>: [q], <b>'threads'</b>: [my_thread]}
<br>
<br>
The thread, <i>my_thread</i>, terminates when it receives a '_finished' message. We want this thread to terminate so that process <i>p1</i> terminates, and then the entire multiprocessing block can terminate as well.
|
import threading
from IoTPy.agent_types.sink import stream_to_queue
def f(in_streams, out_streams):
map_element(lambda v: v+100, in_streams[0], out_streams[0])
def source_thread_target(procs):
for i in range(3):
extend_stream(procs, data=list(range(i*2, (i+1)*2)), stream_name='x')
time.sleep(0.001)
terminate_stream(procs, stream_name='x')
def example_output_thread_with_queue():
q = multiprocessing.Queue()
def g(in_streams, out_streams, q):
s = Stream('s')
map_element(lambda v: v*2, in_streams[0], s)
stream_to_queue(s, q, name='copy_stream_s_to_queue_q')
def get_data_from_output_queue(q):
while True:
v = q.get()
if v == '_finished': break
else: print ('q.get() = ', v)
multicore_specification = [
# Streams
[('x', 'i'), ('y', 'i')],
# Processes
[{'name': 'p0', 'agent': f, 'inputs':['x'], 'outputs': ['y'], 'sources': ['x']},
{'name': 'p1', 'agent': g, 'inputs': ['y'],
'args': [q], 'output_queues': [q]}]
]
processes, procs = get_processes_and_procs(multicore_specification)
source_thread = threading.Thread(target=source_thread_target, args=(procs,))
output_thread = threading.Thread(target=get_data_from_output_queue, args=(q,))
procs['p0'].threads = [source_thread, output_thread]
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_output_thread_with_queue()
|
examples/ExamplesOfMulticorePartTwo.ipynb
|
AssembleSoftware/IoTPy
|
bsd-3-clause
|
Example of Process Structure with Feedback
The example shows a process structure with feedback. This example creates an echo from a spoken sound. (You can write more efficient and succinct code to compute echoes. The code in this example is here merely because it illustrates a concept.)
<br>
Streams
<ol>
<li><b>sound_made</b>: This is the sound made by a speaker in a large spherical space.</li>
<li><b>attenuated</b>: This is the sound made multiplied by an attenuation factor.</li>
<li><b>echo</b>: This is the echo of the sound made heard at the center of the room. The echo is a delay followed by an attenuation of the sound heard. </li>
<li><b>sound_heard</b>: This is the sound that is heard by the speaker. The heard sound is the sound made by the speaker plus the echo.</li>
</ol>
The equations that define the streams are:
<ol>
<li>
<b>attentuated[n] = sound_heard[n]*attenuation</b>
</li>
<li>
<b>echo[n] = attentuated[n-delay]</b> for n > delay.
</li>
<li>
<b>sound_heard[n] = sound_made[n] + echo[n]</b> for n > delay.
</li>
</ol>
Process Structure
Process <i>p0</i> has a source which feeds one of its input streams <i>sound_made</i> with a stream of measurements obtained from a microphone. In this example, the stream is generated with numbers so that we can see how streams are processed.
<br>
<br>
Process <i>p1</i> contains a single input stream which is the sound heard and a single output stream which is an attenuation of the sound heard.
Process Functions
The function <i>f</i> of <i>p0</i> computes <i>echo</i> from <i>sound_made</i>. The first 4 , i.e., <b>delay</b>, units of the echo are empty (i.e. 0).
<br>
<b>map_element(lambda v: v, attenuated, echo)</b>
<br>
copies the attenuated stream to the echo stream; however, since the first 4 (i.e. delay) values of the echo stream are 0, the echo stream will consist of 4 zeroes followed by the attenuated stream.
<br>
<i>out_streams[0]</i> of process <i>p0</i> is <i>sound_heard</i>. Function <i>f</i> makes <i>sound_heard</i> the sum of the echo and the sound made.
<br>
The function <i>g</i> of process <i>p1</i> <i>p0</i> puts elements of its input stream (i.e. <i>sound_heard</i> on queue <i>q</i> and returns the elements multiplied by <i>attenuation</i>.
|
from IoTPy.agent_types.basics import *
def example_echo_two_cores():
# This is the delay from when the made sound hits a
# reflecting surface.
delay = 4
# This is the attenuation of the reflected wave.
attenuation = 0.5
# The results are put in this queue. A thread reads this
# queue and feeds a speaker or headphone.
q = multiprocessing.Queue()
# Agent function for process named 'p0'
# echo is a delay of zeroes followed by attenuated heard sound.
# out_streams[0], which is the same as sound_heard is
# echo + sound_made
def f_echo(in_streams, out_streams, delay):
sound_made, attenuated = in_streams
echo = StreamArray('echo', dtype='float')
echo.extend(np.zeros(delay, dtype='float'))
map_element(lambda v: v, attenuated, echo)
# The zip_map output is the sound heard which is
# the sound heard plus the echo.
zip_map(sum, [sound_made, echo], out_streams[0])
# Agent function for process named 'p1'
# This process puts the sound heard into the output queue
# and returns an attenuated version of the sound_heard as
# its output stream.
def g_echo(in_streams, out_streams, attenuation, q):
def gg(v):
# v is the sound heard
q.put(v)
# v*attenuation is the echo
return v*attenuation
map_element(gg, in_streams[0], out_streams[0])
def source_thread_target(procs):
data=np.arange(10, dtype='float')
extend_stream(procs, data, stream_name='sound_made')
time.sleep(0.0001)
extend_stream(procs, data=np.zeros(10, dtype='float'), stream_name='sound_made')
terminate_stream(procs, stream_name='sound_made')
# Thread that gets data from the output queue
# This thread is included in 'threads' in the specification.
# Thread target
def get_data_from_output_queue(q):
finished_getting_output = False
while not finished_getting_output:
v = q.get()
if v == '_finished': break
print ('heard sound = spoken + echo: ', v)
multicore_specification = [
# Streams
[('sound_made', 'f'), ('attenuated', 'f'), ('sound_heard', 'f')],
# Processes
[{'name': 'p0', 'agent': f_echo, 'inputs': ['sound_made', 'attenuated'],
'outputs': ['sound_heard'], 'keyword_args' : {'delay' : delay}, 'sources': ['sound_made']},
{'name': 'p1', 'agent': g_echo, 'inputs': ['sound_heard'], 'outputs': ['attenuated'],
'args': [attenuation, q], 'output_queues': [q] } ]]
processes, procs = get_processes_and_procs(multicore_specification)
source_thread = threading.Thread(target=source_thread_target, args=(procs,))
output_thread = threading.Thread(target=get_data_from_output_queue, args=(q,))
procs['p0'].threads = [source_thread, output_thread]
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_echo_two_cores()
|
examples/ExamplesOfMulticorePartTwo.ipynb
|
AssembleSoftware/IoTPy
|
bsd-3-clause
|
Example source and actuator thread with single process
This example is the same as the previous one except that the computation is carried out in a single process rather in two processes. The example illustrates an actuator thread and a source thread in the same process.
|
def example_echo_single_core():
# This is the delay from when the made sound hits a
# reflecting surface.
delay = 4
# This is the attenuation of the reflected wave.
attenuation = 0.5
# The results are put in this queue. A thread reads this
# queue and feeds a speaker or headphone.
q = multiprocessing.Queue()
# Agent function for process named 'p0'
# echo is a delay of zeroes followed by attenuated heard sound.
# out_streams[0], which is the same as sound_heard is
# echo + sound_made
def f_echo(in_streams, out_streams, delay, attenuation, q):
echo = StreamArray(
'echo', initial_value=np.array([0.0]*delay, dtype='float'), dtype='float')
#Note: sound_made = in_streams[0]
sound_heard = in_streams[0] + echo
map_element(lambda v: v*attenuation, sound_heard, echo)
stream_to_queue(sound_heard, q)
def source_thread_target(procs):
extend_stream(procs, data=np.arange(10, dtype='float'), stream_name='sound_made')
time.sleep(0.0001)
extend_stream(procs=procs, data=np.zeros(10, dtype='float'), stream_name='sound_made')
terminate_stream(procs, stream_name='sound_made')
# Thread that gets data from the output queue
# This thread is included in 'threads' in the specification.
# Thread target
def get_data_from_output_queue(q):
finished_getting_output = False
while not finished_getting_output:
v = q.get()
if v == '_finished': break
print ('heard sound = spoken + echo: ', v)
multicore_specification = [
# Streams
[('sound_made', 'f')],
# Processes
[{'name': 'p0', 'agent': f_echo, 'inputs': ['sound_made'],
'args' : [delay, attenuation, q], 'sources': ['sound_made'],'output_queues': [q]}]]
processes, procs = get_processes_and_procs(multicore_specification)
source_thread = threading.Thread(target=source_thread_target, args=(procs,))
output_thread = threading.Thread(target=get_data_from_output_queue, args=(q,))
procs['p0'].threads = [source_thread, output_thread]
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_echo_single_core()
|
examples/ExamplesOfMulticorePartTwo.ipynb
|
AssembleSoftware/IoTPy
|
bsd-3-clause
|
Example of a grid computation
Grid computations are used in science, for example in computing the temperature of a metal plate. The grid is partitioned into regions with a process assigned to simulate each region. On the n-th step, each process reads the values of relevant parts of the grid and updates its own value.
<br>
<br>
This example uses two copies of the grid; the two copies are <b>even</b> and <b>odd</b>. On even steps (i.e., steps 0, 2, 4,..) each process reads the <i>even</i> grid and updates its portion of the <i>odd</i> grid. So, each portion of the grid is modified by only one process. And no process is reading values while the values are being modified.
<br>
By symmetry, on odd steps, each process reads the <i>odd</i> grid and updates its portion of the <i>even</i> grid.
<br>
The example problem
A linear metal bar of length <i>N</i> is partitioned into a grid of <i>N</i> continuous regions. Grid 0 is kept at a constant temperature of 0 degrees while grid <i>N-1</i> is kept at a constant temperature of <i>N-1</i> degrees. Initially, the temperature at intermediate grid points is arbitrary; in the code below, the temperature at grid point <i>i</i> exceeds <i>i</i> by <i>DELTA</i>.
<br>
Let <b>TEMP[i][k]</b> be the temperature of the <i>i</i>-th region on step <i>k</i>. Then:
<ol>
<li>TEMP[0][k] = 0 </li>
<li>TEMP[N-1][k] = N-1 </li>
<li>TEMP[i][k] = (TEMP[i-1][k] + TEMP[i][k] + TEMP[i+1][k])/3 i in [1, ..,N-2] </li>
</ol>
Processes
The computation uses <i>N-2</i> processes. The <i>i</i>-th process is called 'grid_i' and is responsible for simulating the <i>i</i>-th region.
<br>
Each process takes the <i>k + 1</i>-th step after it has finished the <i>k</i>-th step and it has determined that its neighbors have also finished the <i>k</i>-th step.
<br>
Streams
The system has one stream, <b>s_i</b> for the <i>i</i>-th process. This stream contains the elements [0, 1, .. , k] after the <i>i</i>-th process has completed <i>k</i>-th steps.
<br>
Process <i>grid_i</i> outputs stream <i>s_i</i> and inputs streams from its neighboring processes which are <i>grid_(i-1)</i> if <i>i</i> exceeds 1 and <i>grid_(i+1)</i> if <i>i</i> is less than <i>N-1</i>.
Process Structure
The process structure is linear with each process getting input streams from each of its neighbors and sending its output stream to all its neighbors.
Process Function
The process begins by sending 0 on its output stream to indicate that it has finished its 0-th step.
<br>
<br>
The <i>k</i>-th value of <i>in_streams[j]</i> is <i>k</i> when the <i>j</i>-th neighboring process has completed its <i>k</i>-th step.
<br>
<br>
<b>synch_stream</b> is an internal stream of the process. The <i>k</i>-th element of this stream is <i>k</i> after all neighboring processes have completed their <i>k</i>-th step.
<br>
<br>
The zip_map function <i>r</i> operates on a list with one element from each neighbor. All the elements of the list will be <i>k</i> on the <i>k</i>-th step. The zip_map function returns <i>k</i> which is any element of the list. In this example it returns the 0-th element.
<br>
<br>
Thus the zip_map function acts as a synchronizer. It waits until all neighbors have completed the <i>k</i>-step and then it outputs <i>k</i>.
<br>
<br>
Function <i>g</i> is called for the <i>k</i>-th time when this process and all its neighbors have completed <i>k - 1</i> steps. Function <i>g</i> does the grid computation. Function <i>r</i> and the zip_map agent are used merely for synchronizing.
run()
Function <i>f</i> calls <b>run</b> after it has declared all its agents. Without calling run() the function will take no action.
<br>
<br>
Note that when using external source threads, you should not call <i>run</i> because the source threads are responsible for starting and stopping the main computational thread. This example has no source threads so you must call <i>run</i> to start the system.
|
from IoTPy.core.stream import _no_value
def test_grid():
# N is the size of the grid
N = 5
# M is the number of steps of execution.
M = 5
# DELTA is the deviation from the final solution.
DELTA = 0.01
# even, odd are the grids that will be returned
# by this computation
even = multiprocessing.Array('f', N)
odd = multiprocessing.Array('f', N)
# Set up initial values of the grid.
for i in range(1, N-1):
even[i] = i + DELTA
even[N-1] = N-1
odd[N-1] = N-1
def f(in_streams, out_streams, index, even, odd):
def g(v):
if (0 < index) and (index < N-1):
if v%2 == 0:
odd[index] = (even[index-1] + even[index] + even[index+1])/3.0
else:
even[index] = (odd[index-1] + odd[index] + odd[index+1])/3.0
return v+1
def r(lst, state):
if state < M:
return lst[0], state+1
else:
return _no_value, state
for out_stream in out_streams: out_stream.extend([0])
synch_stream = Stream('synch_stream')
zip_map(r, in_streams, synch_stream, state=0, name='zip_map_'+str(index))
map_element(g, synch_stream, out_streams[0], name='grid'+str(index))
run()
multicore_specification = [
# Streams
[('s_'+str(index), 'i') for index in range(1, N-1)],
# Processes
[{'name': 'grid_'+str(index), 'agent': f,
'inputs':['s_'+str(index+1), 's_'+str(index-1)],
'outputs':['s_'+str(index)],
'args': [index, even, odd]} for index in range(2, N-2)] + \
[{'name': 'grid_'+str(1), 'agent': f,
'inputs':['s_'+str(2)], 'outputs':['s_'+str(1)],
'args': [1, even, odd]}] + \
[{'name': 'grid_'+str(N-2), 'agent': f,
'inputs':['s_'+str(N-3)], 'outputs':['s_'+str(N-2)],
'args': [N-2, even, odd]}]
]
# Execute processes (after including your own non IoTPy processes)
processes = get_processes(multicore_specification)
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
print ('Grid after ', M, ' steps is: ')
if M%2 == 0:
print (even[:])
else:
print (odd[:])
test_grid()
|
examples/ExamplesOfMulticorePartTwo.ipynb
|
AssembleSoftware/IoTPy
|
bsd-3-clause
|
let us avaluate a function of 3 variables on relatively large mesh
|
T = 1.618033988749895
from numpy import sin,cos,pi
r = 4.77
zmin,zmax = -r,r
xmin,xmax = -r,r
ymin,ymax = -r,r
Nx,Ny,Nz = 80,80,80
x = np.linspace(xmin,xmax,Nx)
y = np.linspace(ymin,ymax,Ny)
z = np.linspace(zmin,zmax,Nz)
x,y,z = np.meshgrid(x,y,z,indexing='ij')
%time p = 2 - (cos(x + T*y) + cos(x - T*y) + cos(y + T*z) \
+ cos(y - T*z) + cos(z - T*x) + cos(z + T*x))
p3d_1 = k3d.marching_cubes(p,xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax, zmin=zmin, zmax=zmax,\
level=0.0)
plot += p3d_1
plot.display()
p3d_1.color = 0x114455
|
examples/SageDays74/implicit_plot3d_interactive.ipynb
|
K3D-tools/K3D-jupyter
|
mit
|
isolevel can be changed from Python side:
|
p3d_1.level=-0.1
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
@interact(l=widgets.FloatSlider(value=-.1,min=-3,max=1.1))
def g(l):
p3d_1.level=-l
|
examples/SageDays74/implicit_plot3d_interactive.ipynb
|
K3D-tools/K3D-jupyter
|
mit
|
to avoid recentering one can disable camera auto fit:
|
plot.camera_auto_fit = False
plot.grid_auto_fit = False
|
examples/SageDays74/implicit_plot3d_interactive.ipynb
|
K3D-tools/K3D-jupyter
|
mit
|
one can add other plots to the same scene:
|
%%time
p =(x**2+y**2+z**2+2*y-1)*((x**2+y**2+z**2-2*y-1)**2-8*z**2)+16*x*z*(x**2+y**2+z**2-2*y-1)
plot += k3d.marching_cubes(p,xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax, zmin=zmin, zmax=zmax, level=0.0,color=0xff0000)
%%time
p = x**2 + y**2 - z**2 -0.
plot += k3d.marching_cubes(p,xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax, zmin=zmin, zmax=zmax, level=0.0,color=0x00ff00)
|
examples/SageDays74/implicit_plot3d_interactive.ipynb
|
K3D-tools/K3D-jupyter
|
mit
|
Config
|
import nlpaug.augmenter.char as nac
import nlpaug.augmenter.word as naw
import nlpaug.augmenter.sentence as nas
import nlpaug.flow as naf
from nlpaug.util import Action
text = 'The quick brown fox jumps over the lazy dog .'
print(text)
|
example/flow.ipynb
|
makcedward/nlpaug
|
mit
|
Flow <a class="anchor" id="flow">
To make use of multiple augmentation, sequential and sometimes pipelines are introduced to connect augmenters.
Sequential Pipeline<a class="anchor" id="seq_pipeline">
Apply different augmenters sequentially
|
aug = naf.Sequential([
nac.RandomCharAug(action="insert"),
naw.RandomWordAug()
])
aug.augment(text)
|
example/flow.ipynb
|
makcedward/nlpaug
|
mit
|
Generate mulitple synthetic data
|
aug = naf.Sequential([
nac.RandomCharAug(action="insert"),
naw.RandomWordAug()
])
aug.augment(text, n=3)
|
example/flow.ipynb
|
makcedward/nlpaug
|
mit
|
Sometimes Pipeline<a class="anchor" id="sometimes_pipeline">
Apply some augmenters randomly
|
aug = naf.Sometimes([
nac.RandomCharAug(action="delete"),
nac.RandomCharAug(action="insert"),
naw.RandomWordAug()
])
aug.augment(text)
|
example/flow.ipynb
|
makcedward/nlpaug
|
mit
|
$$
v(s) = \min_x(cost(x,s) + v(new state(x,s)))
$$
|
class DynamicProgram(object):
"""
Generate a dynamic program to find a set of optimal descissions using the HJB.
define the program by:
Setting intial states via: set_inital_state(list or int)
Setting the number of steps via: set_step_number(int)
Add a set of descissions: add_decisions_set(set)
Add a cost function: add_cost_function(function in terms of state )
Add a state change equation: add_state_eq(function(state))
Add an expression for the last value: add_final_value_expression(function(state,settings))
Add limits on the states: add_state_limits(lower=list or int,upper = list or int)
See below for examples:
"""
def __init__(self):
self.settings = {
'Lower state limits' : [],
'Upper state limits' : [],
'x_set' : set(),
'cache' : {},}
def add_state_eq(self,function):
"""Returns a tuple describing the states.
Remember to increment the first state, b convention the number of steps.
Load additional parameters (usually needed for cost and state value with global variables)
Example of a function that changes the state by the decission:
def new_state(x,s):
return (s[0]+1,s[1]+x) #Return a tuple, use (s[:-1]+(5,)) to slice tuples.
"""
self.settings['State eq.'] = function
def add_cost_function(self,function):
"""
Returns a float or integer describing the cost value.
Load additional parameters (usually needed for cost and state value with global variables)
Example is a function that simply returns the decision as cost:
def cost(x,s):
return x
"""
self.settings['Cost eq.'] = function
def add_final_value_expression(self, function,):
"""
Returns a float or integer as the final value:
Example is a function that returns the ratio of the initial state and the final state:
def val_T(s,settings):
return s[1]/float(settings['Initial state'][1])
"""
self.settings['Final value'] = function
def set_step_number(self,step_number):
"""Number of stages / steps. Integer"""
self.settings['T'] = step_number
def set_inital_state(self,intial_values):
"""Provide the inital state of the states other than the stage number"""
if type(intial_values) is list:
self.settings['Initial state'] = intial_values
self.settings['Initial state'].insert(0,0)
elif type(intial_values) is int:
self.settings['Initial state'] = [intial_values]
self.settings['Initial state'].insert(0,0)
self.settings['Initial state'] = tuple(self.settings['Initial state'])
def add_state_limits(self,lower=[],upper=[]):
"""Add the limits on the state other than the stage number, leave empty if none"""
if type(lower) is list:
self.settings['Lower state limits'].extend(lower)
self.settings['Upper state limits'].extend(upper)
elif type(lower) is int:
self.settings['Lower state limits'] = [lower]
self.settings['Upper state limits'] = [upper]
def solve(self):
"""
Solves the HJB. Returns the optimal value.
Path and further info is stored in the cache. Access it via
retrieve_decisions()
"""
self.settings['cache'] ={}
return self._hjb_(self.settings['Initial state'])
def retrieve_decisions(self):
"""
Retrieve the decisions that led to the optimal value
Returns the cost for the different states, the optimal schedule and the states
that the schedule results in.
"""
sched = np.ones(self.settings['T'])*np.nan
cost_calc= 0
states = []
s = self.settings['Initial state']
t = 0
while t < self.settings['T']:
sched[t] = self.settings['cache'][s][1]
cost_calc += self.settings['Cost eq.'](sched[t],s)
states.append(s[1:])
s = self.settings['State eq.'](sched[t],s)
t += 1
states.append(s[1:])
return cost_calc, sched, states
def return_settings(self):
return self.settings
def return_cache(self):
return self.settings['cache']
def add_decisions_set(self,set_of_decisions):
"""
Add a set of permissable decissions. Must be set of unique integers.
"""
if set(set_of_decisions) != set_of_decisions:
raise TypeError('Expected a set unique values, use set() to declare a set')
self.settings['x_set'] = set(set_of_decisions)
def add_setting(self,key,value):
self.settings[key] = value
def _hjb_(self,s):
if self.settings['cache'].has_key(s):
return self.settings['cache'][s][0]
# check state bounds
for c,i in enumerate(s[1:]):
if i < self.settings['Lower state limits'][c] or i > self.settings['Upper state limits'][c]:
return float('inf')
#Check if reached time step limit:
if s[0] == self.settings['T']:
m = self.settings['Final value'](s,self.settings)
self.settings['cache'][s] = [m, np.nan]
return m
# Else enter recursion
else:
### Make decision variable vector ###
######################################################################################
# Faster but only with integer decisions
p=[]
for x in self.settings['x_set']:
p.append(self.settings['Cost eq.'](x,s)+self._hjb_(self.settings['State eq.'](x,s)))
m = min(p)
### Slower but with any imutable decisions, uncomment if desired: ###
# p ={}
# for x in self.settings['x_set']:
# p[x] = self.settings['Cost eq.'](x,s)+self._hjb_(self.settings['State eq.'](x,s))
# m = min(p, key=p.get)
########################################################################################
##############################################################
## Finding the index of the best solution ##
for x in self.settings['x_set']:
if m == p[x]:
pp = x
################################################################
self.settings['cache'][s] = [m, pp]
return m
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
We can also solve classic dynamic programming problems such as the knapsack problem, hannoi towers or the fibonacci number calculation. Blank functions are outlined below.
The functions must fulfill a range of conditions:
$$
f:\mathcal{S}^n\times x \rightarrow \mathbb{R}
$$
where $\mathcal{S}$ is the set of permissable states, $x$ the decision variable and $n$ the number of dimensions of the state. These are defined by $x \in \mathcal{X}$ the \texttt{set()} of permissable decisions. The state $s \in \mathcal{S}^n$ where $n \geq 2$ and $\mathcal{S} \subset \mathbb{R} $ and is finite.
The new state is defined by a function such that:
$$
f:\mathcal{S}^n\times x \rightarrow \mathcal{S}^n
$$
The value of the final stage is defined as:
$$
f:\mathcal{S}^n \rightarrow \mathbb{R}
$$
which as an impimentation detail has the required argument settings, to which features can be added through add_setting(key,value) where key must be a new and unique dictionary key and value may be any permissable dictionary entry.
|
#help(DynamicProgram)
def cost(x,s):
"""Return a float or integer"""
pass
def new_state(x,s):
"""Return a tuple"""
pass
def val_T(s,settings):
"""Return a float or int"""
pass
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
We can solve a very simple pump optimsiation where the state of water in a tank is given by h and described by:
$$
s_{new} = \begin{cases} (t+1,h-1) & \text{if } x = 0 \ (t+1,h+1) & \text{if } x = 1 \ (t+1,h+1.5) & \text{if } x = 2\end{cases}
$$
The operating cost are described by:
$$
cost = tarrif(t)\times x
$$
where $x$ is the descission variable.
The final value is given by:
$$
V_T = \begin{cases} 0 & \text{if: } h_T \geq h_0 \ Inf &\text{otherwise} \end{cases}
$$
|
def simple_cost(x,s):
tariff = [19, 8, 20, 3, 12, 14, 0, 4, 3, 13, 11, 13, 13, 11, 16, 14, 16,
19, 1, 8, 0, 4, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
12, 3, 18, 15, 3, 10, 12, 6, 3, 5, 11, 0, 11, 8, 10, 11, 5,
15, 8, 2, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
9, 10, 13, 7, 7, 1, 12, 2, 2, 1, 5, 8, 4, 0, 11, 2, 5,
16, 8, 1, 17, 16, 3, 0, 4, 16, 0, 7]
return tariff[s[0]]*x
def val_T(s,settings):
if s[1] < settings['Initial state'][1]:
return float('inf')
else:
return 0
def simple_state(x,s):
#print s
if x == 0:
return (s[0]+1,s[1]-1)
elif x == 1:
return (s[0]+1,s[1]+1)
elif x == 2:
return (s[0]+1,s[1]+1.5)
pumping = DynamicProgram()
pumping.set_step_number(96)
pumping.add_decisions_set({0,1,2})
pumping.add_cost_function(simple_cost)
pumping.add_state_eq(simple_state)
pumping.add_final_value_expression(val_T)
pumping.add_state_limits(lower=0,upper = 200)
pumping.set_inital_state(100)
pumping.return_settings()
#pumping.retrieve_decisions()
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
We can have more than one state variable. For example we can add a second tank and now pump to either of them:
Here they have similar equations, but they can be completly independant, of each other. Any cost and state function that meets the requirments above is allowed.
$$
s_{new} = \begin{cases} (t+1,h_1-1,h_2-1) & \text{if } x = 0 \ (t+1,h_1+1,h_2) & \text{if } x = 1 \ (t+1,h_1,h_2+2) & \text{if } x = 2\end{cases}
$$
|
def simple_state2(x,s):
if x == 0:
return (s[0]+1,s[1]-1,s[2]-1)
elif x == 1:
return (s[0]+1,s[1]+1,s[2])
elif x == 2:
return (s[0]+1,s[1] ,s[2]+2)
# We also need to update the final value function.
def val_T2(s,settings):
if s[1] < settings['Initial state'][1] or s[2] < settings['Initial state'][2]:
return float('inf')
else:
return 0
# For now we leave the cost function the same, but that could also be changed.
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
One final example is the checkerboard problem as outlined here: https://en.wikipedia.org/wiki/Dynamic_programming#Checkerboard
|
board = np.array([[1,3,4,6],
[2,6,2,1],
[7,3,2,1],
[0,4,2,9]])
def cost(x,s):
"""Return a float or integer"""
global board
return board[s[0],s[1]]
def new_state(x,s):
"""Return a tuple"""
global board
return (int(s[0]+1),int(s[1]+x))
def val_T(s,settings):
"""Return a float or int"""
global board
return board[s[0],s[1]]
Checkerboard = DynamicProgram()
Checkerboard.add_cost_function(cost)
Checkerboard.add_decisions_set({-1,0,1})
Checkerboard.add_final_value_expression(val_T)
Checkerboard.add_state_eq(new_state)
Checkerboard.add_state_limits(lower=-1,upper=3)
Checkerboard.set_inital_state(2)
Checkerboard.set_step_number(3)
Checkerboard.solve()
print Checkerboard.retrieve_decisions()
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
An example we can solve is the water allocation problem from the tutorial sheets:
Consider a water supply allocation problem. Suppose that a quantity Q can be
allocated to three water users (indices j=1, 2 and 3), what is allocation x4 which
maximises the total net benefits?
The gross benefit resulting from the allocation of $x_j$ to j is:
$$
a_j(1-exp(-b_jx_j))
$$
Subject to $ (a_j, b_j > 0). $
Moreover, the costs of this allocation are:
$c_jx_j^{d_j}$ ($c_j$, $d_j >0$ and $d_j<1$, because of economy of scale).
The values of the constants are:
Q=5
| | $a_j$|$b_j$|$c_j$|$d_j$|
|---|---|---|---|---|
|j=1 |100|0.1| 10|0.6|
|j=2 | 50|0.4| 10|0.8|
|j=3 |100|0.2| 25|0.4|
Solve the problem in the discrete case (i.e. assuming that $x_j$ is an integer).
The optimisation problem is therefore:
$$
Maximise \sum_j (a_j(1-exp(-b_jx_j)) - c_jx_j^{d_j}
$$
$$
subject to: \sum_j x_j \leq Q
$$
$$
x_j \geq 0
$$
This problem is a non-linear mixed integer problem and quite expensive to solve as branch and bound problem. But with DP it is much easier. (The continous case, may be much easier as NLP than DP, this shows how important it is to pay attenttion ot the problem type)
The decisions are 0 ... 5
The states are the amount of water allocated.
|
Costs = np.array([[100,0.1, 10,0.6],
[50, 0.4, 10,0.8],
[100,0.2, 25,0.4]])
def value(x,s):
global Costs
return Costs[s[0],0]*(1-math.exp(-Costs[s[0],1]*x))
def cost(x,s):
"""Return a float or integer"""
global Costs
if
return -value(x,s) +Costs[s[0],2]*x**Costs[s[0],3]
def new_state(x,s):
"""Return a tuple"""
return s[1]-x
def val_T(s,settings):
"""Return a float or int"""
pass
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
Stochastic Programming
$$
v(s,i) = \min_x( cost(x,s,i)+\sum_j (p_{i,j} \times v(newstate(x,s,j))) )
$$
Where the probability $p_{i,j}$ is the probaility of jumping from state $i$ to state $j$. Currently the transition matrix is invariate, however this can be easily implimented with P as a list of lists.
|
class StochasticProgram(DynamicProgram):
"""
Adds a stochastic component to the dynamic program.
state now is: s where s[0] is the step s[1:-1] is the states of the system and s[-1] is the stochastic state
The transition matrix for the markov chain describing the stochastic bhavior is added by:
add_transition_matrix(P) with P as a list of lists.
"""
def add_transition_matrix(self,P):
"""
Add the transition matrix as list of lists
eg. P = [[0.4,0.5,0.1],
[0.2,0.6,0.2],
[0.1,0.5,0.4]]
"""
self.settings['P'] = np.array(P)
self.settings['Len P'] = len(P)
def retrieve_decisions(self):
"""
Retrieve the decisions that led to the optimal value
Returns the cost for the different states, the optimal schedule and the states that the schedule results in.
"""
schedule = []
cost_calc= np.zeros(self.settings['Len P'])
states = []
for i in range(self.settings['Len P']):
schedule_part = []
states_part = []
s = self.settings['Initial state']
t = 0
while t < self.settings['T']:
schedule_part.append(self.settings['cache'][s][1])
cost_calc[i] += self.settings['Cost eq.'](schedule_part[t],s)
states_part.append(s[1:])
s = self.settings['State eq.'](schedule_part[t],(s[:-1]+(i,) ) )
t += 1
states_part.append(s[1:])
states.append(states_part)
schedule.append(schedule_part)
return cost_calc, schedule, states
def solve(self):
"""
Solves the HJB. Returns the optimal value.
Path and further info is stored in the cache. Access it via
retrieve_decisions()
"""
return self._hjb_stoch_(self.settings['Initial state'])
def _hjb_stoch_(self,s):
if self.settings['cache'].has_key(s):
return self.settings['cache'][s][0]
# check state bounds
for c,i in enumerate(s[1:-1]):
if i < self.settings['Lower state limits'][c] or i > self.settings['Upper state limits'][c]:
return float('inf')
#Check if reached time step limit:
if s[0] == self.settings['T']:
m = self.settings['Final value'](s,self.settings)
self.settings['cache'][s] = [m, np.nan]
return m
# Else enter recursion
else:
p=[]
for x in self.settings['x_set']:
#future = 0
# for i in range(self.settings['Len P']):
# #print self.hjb_stoch(self.settings['State eq.'](x,(s[0:-1]+(i,)))), self.settings['P'][s[-1],i]
# future += self.hjb_stoch(self.settings['State eq.'](x,(s[0:-1]+(i,))))*self.settings['P'][s[-1],i]
future = sum(self._hjb_stoch_(self.settings['State eq.'](x,(s[0:-1]+ (i,))))
*self.settings['P'][s[-1],i] for i in range(self.settings['Len P']))
p.append(self.settings['Cost eq.'](x,s) + future)
m = min(p)
for x in self.settings['x_set']:
if m == p[x]:
pp = x
self.settings['cache'][s] = [m, pp]
return m
help(StochasticProgram)
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
The cost of operating a pump with a given wind turbine power input given by a certain state is given by:
$$
cost(x,t,h,j) := \begin{cases} T(t) \times (x \times P_p - W(t,j)) & \text{if} +ve \
E_{xp} \times (x \times P_p - W(t,j)) & \text{if} -ve\end{cases}
$$
where $W(t,j)$ is the wind power output at time $t$ with an error state $j$.
|
# Convention s = t,h,j
def stoch_simple_state(x,s):
#print s
if x == 0:
return (s[0]+1,s[1]-1,s[2])
elif x == 1:
return (s[0]+1,s[1]+1,s[2])
elif x == 2:
return (s[0]+1,s[1]+1.5,s[2])
def err_corr_wind_power_cost(x,s):
Tariff = [5,5,5,5,5,8,8,8,8,8,12,12,12,12,12,50,50,50,50,20,20,6,5,5]
Wind = [46, 1, 3, 36, 30, 19, 9, 26, 35, 5, 49, 3, 6, 36, 43, 36, 14,
34, 2, 0, 0, 30, 13, 36]
diff = np.array([-1,0,1])*3
Export_price = 5.5
wind_out = Wind[s[0]]+diff[s[2]]
if wind_out <= 0:
wind_out = 0
power_con = x*60-wind_out
if power_con >= 0:
return power_con*Tariff[s[0]]
else:
return power_con*Export_price
def val_T(s,settings):
if s[1] < settings['Initial state'][1]:
return float('inf')
else:
return 0
transition = np.array([[0.4,0.5,0.1],[0.2,0.6,0.2],[0.1,0.5,0.4]])
pumping_stoch = StochasticProgram()
pumping_stoch.add_decisions_set({0,1,2})
pumping_stoch.add_cost_function(err_corr_wind_power_cost)
pumping_stoch.add_state_eq(stoch_simple_state)
pumping_stoch.add_final_value_expression(val_T)
pumping_stoch.add_state_limits(lower=[0,0],upper = [200,3])
pumping_stoch.set_inital_state([100,1])
pumping_stoch.set_step_number(24)
pumping_stoch.add_transition_matrix(transition)
#print pumping_stoch.settings
pumping_stoch.solve()
#pumping_stoch.retrieve_decisions()
pumping_stoch.retrieve_decisions()
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
The cost of operating a pump with a given wind turbine power input is given by:
$$
cost(x,t,h) := \begin{cases} T(t) \times (x \times P_p - W(t)) & \text{if} +ve \
E_{xp} \times (x \times P_p - W(t)) & \text{if} -ve\end{cases}
$$
where $x$ is the descision variable, $W(t)$ is the wind turbine output in time step $t$. $P_p$ is the pump power, $E_{xp}$ is the export price.
|
def wind_power_cost(x,s):
"""Very simple cost function for a pump with wind turbine power"""
Tariff = [5,5,5,5,5,8,8,8,8,8,12,12,12,12,12,50,50,50,50,20,20,6,5,5]
Wind = [46, 1, 3, 36, 30, 19, 9, 26, 35, 5, 49, 3, 6, 36, 43, 36, 14,
34, 2, 0, 0, 30, 13, 36]
Export_price = 5.5
power_con = x*60-Wind[s[0]]
if power_con >= 0:
return power_con*Tariff[s[0]]
else:
return power_con*Export_price
def stoch_wind_power_cost(x,s):
Tariff = [5,5,5,5,5,8,8,8,8,8,12,12,12,12,12,50,50,50,50,20,20,6,5,5]
Wind = [46, 1, 3, 36, 30, 19, 9, 26, 35, 5, 49, 3, 6, 36, 43, 36, 14,
34, 2, 0, 0, 30, 13, 36]
Export_price = 5.5
wind_out = sum(Wind[s[0]]*i for i in settings['P'][s[2]])
power_con = x*60-wind_out
if power_con >= 0:
return power_con*Tariff[s[0]]
else:
return power_con*Export_price
|
Dynamic programming class.ipynb
|
icfly2/hjb_solvers
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.