text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# Store tracts and points in PostGIS
...for a fast spatial-join of points to tracts.
First, install postgres, postgis, and psycopg2. Then create the database from command prompt if it doesn't already exist:
```
createdb -U postgres points_tracts
psql -U postgres -d points_tracts -c "CREATE EXTENSION postgis;"
```
More info in the psycopg2 docs: http://initd.org/psycopg/docs/usage.html
```
import geopandas as gpd
import pandas as pd
import psycopg2
from shapely.geometry import Point
from keys import pg_user, pg_pass, pg_host, pg_port, pg_db
crs = {'init' : 'epsg:4326'}
%%time
# load tracts and project to 4326
tracts = gpd.read_file('data/us_census_tracts_2014')
tracts = tracts.to_crs(crs)
print(len(tracts))
%%time
# load points and set initial crs to 4326
points = pd.read_csv('data/points-dataset.csv')
geometry = points.apply(lambda row: Point((row['lng'], row['lat'])), axis=1)
points = gpd.GeoDataFrame(points, geometry=geometry, crs=crs)
print(len(points))
assert tracts.crs == points.crs
# srid is the numeric spatial reference ID PostGIS uses
srid = tracts.crs['init'].strip('epsg:')
#points = points.sample(1000)
#tracts = tracts[['GEOID', 'ALAND', 'geometry']].sample(1000)
```
## Upload tracts and points to PostGIS
```
connection = psycopg2.connect(database=pg_db,
user=pg_user,
password=pg_pass,
host=pg_host,
port=pg_port)
cursor = connection.cursor()
# list all tables
cursor.execute("select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)'")
cursor.fetchall()
```
#### add tracts table
```
# drop tracts table if it already exists, then create tracts table
cursor.execute("DROP TABLE IF EXISTS tracts")
cursor.execute("CREATE TABLE tracts (id SERIAL PRIMARY KEY, geoid VARCHAR NOT NULL, aland BIGINT NOT NULL)")
cursor.execute("SELECT AddGeometryColumn ('tracts', 'geom', %s, 'MULTIPOLYGON', 2)", [srid])
cursor.execute("CREATE INDEX tract_index ON tracts USING GIST(geom)")
connection.commit()
%%time
cursor.execute("DELETE FROM tracts")
# insert each tract into the tracts table one at a time
for label, row in tracts.iterrows():
geoid = row['GEOID']
aland = row['ALAND']
geometry_wkt = row['geometry'].wkt
query = """INSERT INTO tracts (geoid, aland, geom)
VALUES (%s, %s, ST_Multi(ST_GeomFromText(%s, %s)))"""
data = (geoid, aland, geometry_wkt, srid)
cursor.execute(query, data)
connection.commit()
```
#### add points table
```
# drop points table if it already exists, then create points table
cursor.execute("DROP TABLE IF EXISTS points")
cursor.execute("""CREATE TABLE points (id SERIAL PRIMARY KEY,
date VARCHAR NOT NULL,
region VARCHAR NOT NULL,
bedrooms INTEGER,
rent REAL,
sqft REAL)""")
cursor.execute("SELECT AddGeometryColumn ('points', 'geom', %s, 'POINT', 2)", [srid])
cursor.execute("CREATE INDEX point_index ON points USING GIST(geom)")
connection.commit()
%%time
cursor.execute("DELETE FROM points")
# insert each point into the points table one at a time
for label, row in points.iterrows():
date = row['date']
region = row['region']
bedrooms = row['bedrooms']
rent = row['rent']
sqft = row['sqft']
geometry_wkt = row['geometry'].wkt
# bedrooms can be null, but must be None for psycopg2 to insert it as a null value, not a 'NaN' string
if pd.isnull(bedrooms):
bedrooms = None
query = """
INSERT INTO points (date, region, bedrooms, rent, sqft, geom)
VALUES (%s, %s, %s, %s, %s, ST_GeomFromText(%s, %s))
"""
data = (date, region, bedrooms, rent, sqft, geometry_wkt, srid)
cursor.execute(query, data)
connection.commit()
```
#### optimize the database
```
%%time
# vacuum and analyze the database to optimize it after building indices and inserting rows
original_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
cursor.execute("VACUUM ANALYZE")
connection.commit()
connection.set_isolation_level(original_isolation_level)
```
#### verify SRIDs, row counts, and data
```
# look up the SRIDs
cursor.execute("""SELECT
Find_SRID('public', 'tracts', 'geom') as tracts_srid,
Find_SRID('public', 'points', 'geom') as points_srid""")
cursor.fetchall()
cursor.execute("SELECT count(*) AS exact_count FROM tracts")
rows = cursor.fetchall()
rows[0][0]
cursor.execute("SELECT geoid, aland, ST_AsText(geom) FROM tracts LIMIT 3")
rows = cursor.fetchall()
gpd.GeoDataFrame(rows, columns=['GEOID', 'ALAND', 'geometry'])
cursor.execute("SELECT count(*) AS exact_count FROM points")
rows = cursor.fetchall()
rows[0][0]
cursor.execute("""SELECT date, region, bedrooms, rent, sqft, ST_AsText(geom)
FROM points LIMIT 3""")
rows = cursor.fetchall()
gpd.GeoDataFrame(rows, columns=['date', 'region', 'bedrooms', 'rent', 'sqft', 'geometry'])
```
## all done
```
cursor.close()
connection.close()
```
| github_jupyter |
# Load PyTorch model
In this tutorial, you learn how to load an existing PyTorch model and use it to run a prediction task.
We will run the inference in DJL way with [example](https://pytorch.org/hub/pytorch_vision_resnet/) on the pytorch official website.
## Preparation
This tutorial requires the installation of Java Kernel. For more information on installing the Java Kernel, see the [README](https://github.com/deepjavalibrary/djl/blob/master/jupyter/README.md).
```
// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.14.0
%maven ai.djl.pytorch:pytorch-engine:0.14.0
%maven org.slf4j:slf4j-api:1.7.32
%maven org.slf4j:slf4j-simple:1.7.32
%maven net.java.dev.jna:jna:5.8.0
import java.nio.file.*;
import java.awt.image.*;
import ai.djl.*;
import ai.djl.inference.*;
import ai.djl.modality.*;
import ai.djl.modality.cv.*;
import ai.djl.modality.cv.util.*;
import ai.djl.modality.cv.transform.*;
import ai.djl.modality.cv.translator.*;
import ai.djl.repository.zoo.*;
import ai.djl.translate.*;
import ai.djl.training.util.*;
```
## Step 1: Prepare your model
This tutorial assumes that you have a TorchScript model.
DJL only supports the TorchScript format for loading models from PyTorch, so other models will need to be [converted](https://github.com/deepjavalibrary/djl/blob/master/docs/pytorch/how_to_convert_your_model_to_torchscript.md).
A TorchScript model includes the model structure and all of the parameters.
We will be using a pre-trained `resnet18` model. First, use the `DownloadUtils` to download the model files and save them in the `build/pytorch_models` folder
```
DownloadUtils.download("https://djl-ai.s3.amazonaws.com/mlrepo/model/cv/image_classification/ai/djl/pytorch/resnet/0.0.1/traced_resnet18.pt.gz", "build/pytorch_models/resnet18/resnet18.pt", new ProgressBar());
```
In order to do image classification, you will also need the synset.txt which stores the classification class labels. We will need the synset containing the Imagenet labels with which resnet18 was originally trained.
```
DownloadUtils.download("https://djl-ai.s3.amazonaws.com/mlrepo/model/cv/image_classification/ai/djl/pytorch/synset.txt", "build/pytorch_models/resnet18/synset.txt", new ProgressBar());
```
## Step 2: Create a Translator
We will create a transformation pipeline which maps the transforms shown in the [PyTorch example](https://pytorch.org/hub/pytorch_vision_resnet/).
```python
...
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
...
```
Then, we will use this pipeline to create the [`Translator`](https://javadoc.io/static/ai.djl/api/0.14.0/index.html?ai/djl/translate/Translator.html)
```
Translator<Image, Classifications> translator = ImageClassificationTranslator.builder()
.addTransform(new Resize(256))
.addTransform(new CenterCrop(224, 224))
.addTransform(new ToTensor())
.addTransform(new Normalize(
new float[] {0.485f, 0.456f, 0.406f},
new float[] {0.229f, 0.224f, 0.225f}))
.optApplySoftmax(true)
.build();
```
## Step 3: Load your model
Next, we add some search criteria to find the resnet18 model and load it. In this case, we need to tell `Criteria` where to locate the model by calling `.optModelPath()` API.
```
Criteria<Image, Classifications> criteria = Criteria.builder()
.setTypes(Image.class, Classifications.class)
.optModelPath(Paths.get("build/pytorch_models/resnet18"))
.optOption("mapLocation", "true") // this model requires mapLocation for GPU
.optTranslator(translator)
.optProgress(new ProgressBar()).build();
ZooModel model = criteria.loadModel();
```
## Step 4: Load image for classification
We will use a sample dog image to run our prediction on.
```
var img = ImageFactory.getInstance().fromUrl("https://raw.githubusercontent.com/pytorch/hub/master/images/dog.jpg");
img.getWrappedImage()
```
## Step 5: Run inference
Lastly, we will need to create a predictor using our model and translator. Once we have a predictor, we simply need to call the predict method on our test image.
```
Predictor<Image, Classifications> predictor = model.newPredictor();
Classifications classifications = predictor.predict(img);
classifications
```
## Summary
Now, you can load any TorchScript model and run inference using it.
You might also want to check out [load_mxnet_model.ipynb](https://github.com/deepjavalibrary/djl/blob/master/jupyter/load_mxnet_model.ipynb) which demonstrates loading a local model directly instead of through the Model Zoo API.
To optimize inference performance, you might check out [how_to_optimize_inference_performance](https://github.com/deepjavalibrary/djl/blob/master/docs/pytorch/how_to_optimize_inference_performance.md).
| github_jupyter |
# Chapter 4 : Statistics and Linear Algebra
# basic descriptive statistics
```
%matplotlib inline
import numpy as np
from scipy.stats import scoreatpercentile
import pandas as pd
data = pd.read_csv("co2.csv", index_col=0, parse_dates=True)
co2 = np.array(data.co2)
print("The statistical valus for amounts of co2 in atmosphere : \n")
print("Max method : ", co2.max())
print("Max function : ", np.max(co2))
print("Min method : ", co2.min())
print("Min function : ", np.min(co2))
print("Mean method : ", co2.mean())
print("Mean function : ", np.mean(co2))
print("Std method : ", co2.std())
print("Std function : ", np.std(co2))
print("Median : ", np.median(co2))
print("Score at percentile 50 : ", scoreatpercentile(co2, 50))
```
# Linear Algebra
## Inverting a matrix
```
A = np.mat("2 4 6;4 2 6;10 -4 18")
print("A\n", A)
inverse = np.linalg.inv(A)
print("inverse of A\n", inverse)
print("Check\n", A * inverse)
print("Error\n", A * inverse - np.eye(3))
```
## Solving linear systems
```
A = np.mat("1 -2 1;0 2 -8;-4 5 9")
print("A\n", A)
b = np.array([0, 8, -9])
print("b\n", b)
x = np.linalg.solve(A, b)
print("Solution", x)
print("Check\n", np.dot(A , x))
```
## Finding eigenvalues and eigenvectors
```
A = np.mat("3 -2;1 0")
print("A\n", A)
print("Eigenvalues", np.linalg.eigvals(A))
eigenvalues, eigenvectors = np.linalg.eig(A)
print("First tuple of eig", eigenvalues)
print("Second tuple of eig\n", eigenvectors)
for i in range(len(eigenvalues)):
print("Left", np.dot(A, eigenvectors[:,i]))
print("Right", eigenvalues[i] * eigenvectors[:,i])
```
# random numbers
## binomial distribution
```
import numpy as np
from matplotlib.pyplot import plot, show
cash = np.zeros(10000)
cash[0] = 1000
outcome = np.random.binomial(9, 0.5, size=len(cash))
for i in range(1, len(cash)):
if outcome[i] < 5:
cash[i] = cash[i - 1] - 1
elif outcome[i] < 10:
cash[i] = cash[i - 1] + 1
else:
raise AssertionError("Unexpected outcome " + outcome)
print(outcome.min(), outcome.max())
plot(np.arange(len(cash)), cash)
show()
```
## normal distribution
```
import numpy as np
import matplotlib.pyplot as plt
N=10000
normal_values = np.random.normal(size=N)
dummy, bins, dummy = plt.hist(normal_values, int(np.sqrt(N)), normed=True, lw=1)
sigma = 1
mu = 0
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ),lw=2)
plt.show()
```
## normality test with scipy
```
import numpy as np
from scipy.stats import shapiro
from scipy.stats import anderson
from scipy.stats import normaltest
flutrends = np.loadtxt("goog_flutrends.csv", delimiter=',', usecols=(1,), skiprows=1, converters = {1: lambda s: float(s or 0)}, unpack=True)
N = len(flutrends)
normal_values = np.random.normal(size=N)
zero_values = np.zeros(N)
print("Normal Values Shapiro", shapiro(normal_values))
#print("Zeroes Shapiro", shapiro(zero_values))
print("Flu Shapiro", shapiro(flutrends))
print("Normal Values Anderson", anderson(normal_values))
#print("Zeroes Anderson", anderson(zero_values))
print("Flu Anderson", anderson(flutrends))
print("Normal Values normaltest", normaltest(normal_values))
#print("Zeroes normaltest", normaltest(zero_values))
print("Flu normaltest", normaltest(flutrends))
```
# Numpy masked array
```
import numpy
import scipy
import matplotlib.pyplot as plt
face = scipy.misc.face()
random_mask = numpy.random.randint(0, 2, size=face.shape)
plt.subplot(221)
plt.title("Original")
plt.imshow(face)
plt.axis('off')
masked_array = numpy.ma.array(face, mask=random_mask)
plt.subplot(222)
plt.title("Masked")
plt.imshow(masked_array)
plt.axis('off')
plt.subplot(223)
plt.title("Log")
plt.imshow(numpy.ma.log(face).astype("float32"))
plt.axis('off')
plt.subplot(224)
plt.title("Log Masked")
plt.imshow(numpy.ma.log(masked_array).astype("float32"))
plt.axis('off')
plt.show()
```
# Disregarding negative and extreme values
```
import numpy as np
from datetime import date
import sys
import matplotlib.pyplot as plt
salary = np.loadtxt("MLB2008.csv", delimiter=',', usecols=(1,), skiprows=1, unpack=True)
triples = np.arange(0, len(salary), 3)
print("Triples", triples[:10], "...")
signs = np.ones(len(salary))
print("Signs", signs[:10], "...")
signs[triples] = -1
print("Signs", signs[:10], "...")
ma_log = np.ma.log(salary * signs)
print("Masked logs", ma_log[:10], "...")
dev = salary.std()
avg = salary.mean()
inside = np.ma.masked_outside(salary, avg - dev, avg + dev)
print("Inside", inside[:10], "...")
plt.subplot(311)
plt.title("Original")
plt.plot(salary)
plt.subplot(312)
plt.title("Log Masked")
plt.plot(np.exp(ma_log))
plt.subplot(313)
plt.title("Not Extreme")
plt.plot(inside)
plt.subplots_adjust(hspace=.9)
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
df = pd.read_csv('Z_sani.csv')
# 1: oh 2: or 3:mm 4: std 5:target
encode_list = [3,3,3,1,3,1,1,1,1,1,1,1,1,1,1,1,1,3,3,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
4,4,4,4,4,4,4,3,3,3,4,4,3,4,4,1,2,5]
df.head()
from sklearn.model_selection import train_test_split
X = df.drop('Cath', axis=1)
y = df['Cath']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.00004)
#####
####
from sklearn.preprocessing import OneHotEncoder,OrdinalEncoder,MinMaxScaler,StandardScaler,LabelEncoder
train_col_to_drop = pd.DataFrame({df.columns[0]:X_train[df.columns[0]]})
test_col_to_drop = pd.DataFrame({df.columns[0]:X_test[df.columns[0]]})
col_names = []
for i in range(55):
if encode_list[i] == 1:
train = pd.DataFrame({df.columns[i]:X_train[df.columns[i]]})
test = pd.DataFrame({df.columns[i]:X_test[df.columns[i]]})
cd = OneHotEncoder(sparse=False,handle_unknown = "ignore")
X_t_train = cd.fit_transform(train)
X_t_test = cd.transform(test)
col_name = train.columns[0]
num_col = X_t_train.shape[1]
for j in range(num_col):
col_names.append(col_name + str(j))
if encode_list[i] == 2:
train = pd.DataFrame({df.columns[i]:X_train[df.columns[i]]})
test = pd.DataFrame({df.columns[i]:X_test[df.columns[i]]})
cd = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_t_train = cd.fit_transform(train)
X_t_test = cd.transform(test)
col_name = train.columns[0]
col_names.append(col_name)
if encode_list[i] == 3:
train = pd.DataFrame({df.columns[i]:X_train[df.columns[i]]})
test = pd.DataFrame({df.columns[i]:X_test[df.columns[i]]})
cd = MinMaxScaler()
X_t_train = cd.fit_transform(train)
X_t_test = cd.transform(test)
col_name = train.columns[0]
col_names.append(col_name)
if encode_list[i] == 4:
train = pd.DataFrame({df.columns[i]:X_train[df.columns[i]]})
test = pd.DataFrame({df.columns[i]:X_test[df.columns[i]]})
cd = StandardScaler()
X_t_train = cd.fit_transform(train)
X_t_test = cd.transform(test)
col_name = train.columns[0]
col_names.append(col_name)
train_col_to_drop = np.concatenate((train_col_to_drop,X_t_train),axis=1)
test_col_to_drop = np.concatenate((test_col_to_drop,X_t_test),axis=1)
le = LabelEncoder()
y_train_le = le.fit_transform(y_train)
y_test_le = le.transform(y_test)
train_data = pd.DataFrame(train_col_to_drop).drop(0,axis=1)
test_data = pd.DataFrame(test_col_to_drop).drop(0,axis=1)
train_data.columns = col_names
test_data.columns = col_names
train_data['y'] = y_train_le
test_data['y'] = y_test_le
# train_data.to_csv("train.csv")
# test_data.to_csv("preprocess_testdata.csv")
ftr_mis = []
for i in df.columns:
ftr_mis.append(np.mean(df[i].isnull()))
print(ftr_mis)
from matplotlib import pylab as plt
df_preprocessed = train_data
corrmat = df_preprocessed.corr()
all_cols = corrmat.sort_values('y',ascending=False)['y'].index
cols = all_cols[1:8] # positively correlated features
#cols = ['SalePrice']+[col for col in all_cols if '_nan' not in col][:-10:-1] # negatively correlated features
pd.plotting.scatter_matrix(df_preprocessed[cols],c = df_preprocessed['y'], figsize=(15, 15), marker='o',
hist_kwds={'bins': 20}, s=100, alpha=.6)
plt.show()
all_cols = corrmat.sort_values('y',ascending=False)['y'].index
cols = all_cols[:-8:-1] # positively correlated features
#cols = ['SalePrice']+[col for col in all_cols if '_nan' not in col][:-10:-1] # negatively correlated features
pd.plotting.scatter_matrix(df_preprocessed[cols],c = df_preprocessed['y'], figsize=(15, 15), marker='o',
hist_kwds={'bins': 20}, s=100, alpha=.6)
plt.show()
corrmat = df_preprocessed.corr()
# use the absolute value of the correlation matrix for sorting!
all_cols = np.abs(corrmat).sort_values('y',ascending=False)['y'].index
cols = all_cols[:10] # positively correlated features
cm = corrmat.loc[cols,cols]
plt.figure(figsize=(10,10))
plt.matshow(cm,vmin=-1,vmax=1,cmap='seismic',fignum=0)
plt.colorbar(label='corr. coeff.')
plt.xticks(np.arange(cm.shape[0]),list(cols),rotation=90)
plt.yticks(np.arange(cm.shape[0]),list(cols))
plt.tight_layout()
plt.show()
all_cols
count_matrix = df_preprocessed.groupby(['Typical Chest Pain0', 'y']).size().unstack()
# print(count_matrix)
count_matrix_norm = count_matrix.div(count_matrix.sum(axis=1),axis=0)
# print(count_matrix_norm)
plt.matshow(count_matrix_norm.T,vmin=0,vmax=1)
plt.xticks(np.arange(count_matrix_norm.shape[0]),count_matrix_norm.index,rotation=90)
plt.yticks(np.arange(count_matrix_norm.shape[1]),count_matrix_norm.columns)
plt.ylabel('Typical Chest Pain')
plt.xlabel('CAD')
plt.colorbar(label='fraction')
plt.text(0.65, 0.8, "0.55", fontsize=18,color='red')
plt.text(0.65, 0.3, "0.06", fontsize=18,color='red')
plt.text(-0.1, 0.3, "0.94", fontsize=18,color='red')
plt.text(-0.1, 0.8, "0.45", fontsize=18,color='red')
# plt.tight_layout()
plt.show()
import plotly.graph_objects as go
fig = go.Figure(data=go.Heatmap(
z=count_matrix_norm,
x=['No CAD', 'CAD'],
y=['No Chest Pain', 'Chest Pain']))
fig.show()
count_matrix_norm[0][0]
cad_df = pd.DataFrame( {'Chest Pain':[ "yes","no","yes","no" ],
'rate':[count_matrix_norm[0][0],count_matrix_norm[1][1],count_matrix_norm[1][0],count_matrix_norm[0][1]],
"CAD": [ 'yes','yes','no','no' ]
} )
cad_df
import plotly.express as px
import plotly
fig = px.bar(cad_df,x='Chest Pain',y="rate",color="CAD")
# plotly.io.orca.config.executable = '/path/to/orca'
# plotly.io.orca.config.save()
fig.update_layout(
yaxis_title="Ratio"
)
fig.show()
# fig.write_image("fig1.png")
lab_df = pd.DataFrame( {'CAD':[ "yes","no" ],
'ratio':[np.mean(df['Cath'] == 'Cad'),np.mean(df['Cath'] == 'Normal')],
} )
np.mean(df['Cath'] == 'Cad')
np.mean(df['Cath'] == 'Normal')
import plotly.express as px
fig = px.bar(lab_df,x='CAD',y="ratio",color="CAD")
fig.show()
plt.hist(df_preprocessed['Age'][df_preprocessed['y'] == 1],alpha=0.6,normed=True)
plt.hist(df_preprocessed['Age'][df_preprocessed['y'] == 0],alpha=0.6,normed=True)
plt.ylabel('density')
plt.xlabel('Age')
plt.legend()
plt.show()
import plotly.express as px
# fig = px.histogram(df, x="Age", y="Cath", color="Cath",histnorm='probability density')
# fig.update_traces(opacity=0.5)
# fig.show()
import plotly.graph_objects as go
x0 = df['Age'][df['Cath'] == 'Cad']
# Add 1 to shift the mean of the Gaussian distribution
x1 = df['Age'][df['Cath'] == 'Normal']
fig = go.Figure()
fig.add_trace(go.Histogram(x=x0,histnorm='probability density',name='CAD'))
fig.add_trace(go.Histogram(x=x1,histnorm='probability density',name='Normal'))
# Overlay both histograms
fig.update_layout(barmode='overlay')
fig.update_layout(
xaxis_title="Age",
yaxis_title="Density"
# font=dict(
# family="Courier New, monospace",
# size=18,
# # color="#7f7f7f"
# )
)
# Reduce opacity to see both histograms
fig.update_traces(opacity=0.7)
fig.show()
df[['EF-TTE','Cath']].boxplot(by='Cath')
# plt.plot([50, 0], [1, 50], 'k-', lw=.5,color='red')
plt.show()
df2 = df.copy()
for i in range(len(df['Cath'])):
if df['Cath'][i] == 'Cad':
df2['Cath'][i] = 'yes'
else:
df2['Cath'][i] = 'no'
df2['Cath'].describe()
fig = px.box(df2, y='EF-TTE', x='Cath', color='Cath', notched=True)
fig.update_layout(
xaxis_title="Cad"
)
fig.show()
from mpl_toolkits import mplot3d
x = df_preprocessed["Typical Chest Pain0"]
y = df_preprocessed["Age"]
z = df_preprocessed["EF-TTE"]
# "Age" "Atypical0" 'Region RWMA0' "EF-TTE"
ax = plt.axes(projection='3d')
ax.scatter(x, y, z, c=df_preprocessed["y"], cmap='viridis', linewidth=0.5)
import plotly.express as px
fig = px.scatter_3d(df_preprocessed, x= "Typical Chest Pain0", y="Age", z="EF-TTE",
color="y", symbol="y")
# "Age" "Atypical0" 'Region RWMA0' "EF-TTE" 'HTN0' "Typical Chest Pain0"
fig.show()
feature_names = X.columns
from sklearn.feature_selection import f_classif, mutual_info_classif
y = df_preprocessed['y'].values
X = df_preprocessed.drop(columns=['y'])
from sklearn.feature_selection import SelectKBest
m_select = SelectKBest(mutual_info_classif,k=10)
m_select.fit_transform(X,y)
m_feature = feature_names[m_select.get_support()]
print(m_feature)
f_select = SelectKBest(f_classif,k=10)
f_select.fit_transform(X,y)
f_feature = feature_names[f_select.get_support()]
print(f_feature)
df_preprocessed['Typical Chest Pain0']==1
max(df_preprocessed['EF-TTE'])
condn = (df_preprocessed['Typical Chest Pain0']==1).values * ((df_preprocessed['EF-TTE']-4.5*df_preprocessed['Age']+3.2)>=0).values
aa = df_preprocessed[condn]
a = aa[aa['y']==1].shape[0]
aa = df_preprocessed[np.logical_not(condn)]
b = aa[aa['y']==0].shape[0]
(a+b)/303
df.columns
df.head()
cat_ftr = ["Sex",'Obesity','CRF', 'CVA','Airway disease','Thyroid Disease', 'CHF', 'DLP',
'Weak Peripheral Pulse', 'Lung rales','Systolic Murmur','Diastolic Murmur','Dyspnea', 'Function Class',
'Atypical', 'Nonanginal', 'Exertional CP', 'LowTH Ang','LVH','Poor R Progression', 'BBB','Region RWMA']
bi_ftr = ["DM","HTN","Current Smoker",'EX-Smoker','FH','Edema','Typical Chest Pain',
'Q Wave','St Elevation', 'St Depression', 'Tinversion']
ord_ftr = ["VHD"]
cont_ftr = ["Age","Weight","Length","BMI",'BP', 'PR','FBS', 'CR', 'TG', 'LDL', 'HDL', 'BUN',
'ESR', 'HB', 'K', 'Na', 'WBC', 'Lymph', 'Neut', 'PLT', 'EF-TTE']
import pandas as pd
import numpy as np
df = pd.read_csv('Z_sani.csv')
X = df.drop('Cath', axis=1)
y = df['Cath']
y
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(["Cad","Normal"])
y1 = pd.DataFrame({"y":le.transform(y)*-1+1})
df['Cath'] = y1
y = df['Cath']
y
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, fbeta_score
from sklearn.metrics import confusion_matrix
def logit_pip(X,y,random_state,n_folds):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state[0])
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state[1])
CV_scores = []
test_scores = []
pre = []
rec = []
f13 = []
cf = []
for train_index, CV_index in kf.split(X_other,y_other):
X_train, X_CV = X_other.iloc[train_index], X_other.iloc[CV_index]
y_train, y_CV = y_other.iloc[train_index], y_other.iloc[CV_index]
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_train[cont_ftr]
X_train_cat = X_train[cat_ftr]
X_train_od = X_train[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_train[bi_ftr]),X_train_od),axis=1)
X_c_cont = X_CV[cont_ftr]
X_c_cat = X_CV[cat_ftr]
X_c_od = X_CV[ord_ftr]
X_c_cont = scaler.transform(X_c_cont)
X_c_cat = oh.transform(X_c_cat)
X_c_od = od.transform(X_c_od)
X_c = np.concatenate((X_c_cont,X_c_cat,
np.array(X_CV[bi_ftr]),X_c_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
cs = np.logspace(1,-3,num=50)
train_score = []
CV_score = []
regs = []
for c in cs:
reg = LogisticRegression(penalty="l1",C=c,solver="saga",max_iter=10**4)
reg.fit(X_train,y_train)
train_score.append(accuracy_score(y_train,reg.predict(X_train)))
CV_score.append(accuracy_score(y_CV,reg.predict(X_c)))
regs.append(reg)
best_c = cs[np.argmax(CV_score)]
reg = regs[np.argmax(CV_score)]
CV_scores.append(np.max(CV_score))
test_scores.append(accuracy_score(y_test,reg.predict(X_t)))
pre.append(precision_score(y_test,reg.predict(X_t)))
rec.append(recall_score(y_test,reg.predict(X_t)))
f13.append(fbeta_score(y_test,reg.predict(X_t),1.3))
# cf.append(confusion_matrix(y,reg.predict(X)))
return (CV_scores,test_scores,pre,rec,f13,best_c)
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, fbeta_score
from sklearn.metrics import confusion_matrix
import pickle
rst = np.array([[0, 0],[0, 0]])
for i in range(1,11):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = 10*i)
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_other[cont_ftr]
X_train_cat = X_other[cat_ftr]
X_train_od = X_other[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_other[bi_ftr]),X_train_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
reg = LogisticRegression(penalty="l1",C=0.7196,solver="saga",max_iter=10**4)
pickle.dump(reg, open("logi.sav", 'wb'))
reg.fit(X_train,y_other)
rst = rst + np.array(confusion_matrix(y_test,reg.predict(X_t))/sum(confusion_matrix(y_test,reg.predict(X_t))))
rst/10
ac, pre, rec, f13 = [], [], [], []
for i in range(1,6):
op = logit_pip(X,y,[10*i,14*i],10)
ac.append(np.mean(op[1]))
pre.append(np.mean(op[2]))
rec.append(np.mean(op[3]))
f13.append(np.mean(op[4]))
print("Mean of test accuracy scores: " + str(np.mean(ac)))
print("Std of test accuracy scores: " + str(np.std(ac)))
print("Mean of test pre scores: " + str(np.mean(pre)))
print("Std of test pre scores: " + str(np.std(pre)))
print("Mean of test rec scores: " + str(np.mean(rec)))
print("Std of test rec scores: " + str(np.std(rec)))
print("Mean of test f13 scores: " + str(np.mean(f13)))
print("Std of test f13 scores: " + str(np.std(f13)))
print(op[5])
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
def rf_pip(X,y,random_state,n_folds):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state[0])
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state[1])
CV_scores = []
test_scores = []
pre = []
rec = []
f13 = []
cf = []
for train_index, CV_index in kf.split(X_other,y_other):
X_train, X_CV = X_other.iloc[train_index], X_other.iloc[CV_index]
y_train, y_CV = y_other.iloc[train_index], y_other.iloc[CV_index]
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_train[cont_ftr]
X_train_cat = X_train[cat_ftr]
X_train_od = X_train[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_train[bi_ftr]),X_train_od),axis=1)
X_c_cont = X_CV[cont_ftr]
X_c_cat = X_CV[cat_ftr]
X_c_od = X_CV[ord_ftr]
X_c_cont = scaler.transform(X_c_cont)
X_c_cat = oh.transform(X_c_cat)
X_c_od = od.transform(X_c_od)
X_c = np.concatenate((X_c_cont,X_c_cat,
np.array(X_CV[bi_ftr]),X_c_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
train_score = []
CV_score = []
regs = []
par = range(2,11)
par_ls = []
for i in par:
for j in par:
reg = RandomForestClassifier(random_state = random_state[2], max_depth=i,
1=j,n_estimators = 50)
reg.fit(X_train,y_train)
train_score.append(accuracy_score(y_train,reg.predict(X_train)))
CV_score.append(accuracy_score(y_CV,reg.predict(X_c)))
regs.append(reg)
best_par = (par[np.argmax(CV_score)//len(par)],par[np.argmin(CV_score)%len(par)])
reg = regs[np.argmax(CV_score)]
CV_scores.append(np.max(CV_score))
test_scores.append(accuracy_score(y_test,reg.predict(X_t)))
pre.append(precision_score(y_test,reg.predict(X_t)))
rec.append(recall_score(y_test,reg.predict(X_t)))
f13.append(fbeta_score(y_test,reg.predict(X_t),1.3))
# cf.append(confusion_matrix(y,reg.predict(X)))
return (CV_scores,test_scores,pre,rec,f13,best_par)
ac, pre, rec, f13 = [], [], [], []
for i in range(1,6):
op = rf_pip(X,y,[10*i,14*i,96*i],10)
ac.append(np.mean(op[1]))
pre.append(np.mean(op[2]))
rec.append(np.mean(op[3]))
f13.append(np.mean(op[4]))
print("Mean of test accuracy scores: " + str(np.mean(ac)))
print("Std of test accuracy scores: " + str(np.std(ac)))
print("Mean of test pre scores: " + str(np.mean(pre)))
print("Std of test pre scores: " + str(np.std(pre)))
print("Mean of test rec scores: " + str(np.mean(rec)))
print("Std of test rec scores: " + str(np.std(rec)))
print("Mean of test f13 scores: " + str(np.mean(f13)))
print("Std of test f13 scores: " + str(np.std(f13)))
print(op[5])
from sklearn.ensemble import RandomForestClassifier
rst = np.array([[0, 0],[0, 0]])
for i in range(1,11):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = 10*i)
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_other[cont_ftr]
X_train_cat = X_other[cat_ftr]
X_train_od = X_other[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_other[bi_ftr]),X_train_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
reg = RandomForestClassifier(random_state = 14, max_depth=6,
min_samples_split=2,n_estimators = 50)
pickle.dump(reg, open("rf.sav", 'wb'))
reg.fit(X_train,y_other)
rst = rst + np.array(confusion_matrix(y_test,reg.predict(X_t))/sum(confusion_matrix(y_test,reg.predict(X_t))))
rst/10
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn import svm
from sklearn.metrics import accuracy_score
def svm_pip(X,y,random_state,n_folds):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state[0])
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state[1])
CV_scores = []
test_scores = []
pre = []
rec = []
f13 = []
cf = []
for train_index, CV_index in kf.split(X_other,y_other):
X_train, X_CV = X_other.iloc[train_index], X_other.iloc[CV_index]
y_train, y_CV = y_other.iloc[train_index], y_other.iloc[CV_index]
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_train[cont_ftr]
X_train_cat = X_train[cat_ftr]
X_train_od = X_train[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_train[bi_ftr]),X_train_od),axis=1)
X_c_cont = X_CV[cont_ftr]
X_c_cat = X_CV[cat_ftr]
X_c_od = X_CV[ord_ftr]
X_c_cont = scaler.transform(X_c_cont)
X_c_cat = oh.transform(X_c_cat)
X_c_od = od.transform(X_c_od)
X_c = np.concatenate((X_c_cont,X_c_cat,
np.array(X_CV[bi_ftr]),X_c_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
train_score = []
CV_score = []
regs = []
par = np.logspace(6,-2,num=55)
for i in par:
for j in par:
reg = svm.SVC(C=i, gamma=j)
reg.fit(X_train,y_train)
train_score.append(accuracy_score(y_train,reg.predict(X_train)))
CV_score.append(accuracy_score(y_CV,reg.predict(X_c)))
regs.append(reg)
best_par = (par[np.argmax(CV_score)//len(par)],par[np.argmin(CV_score)%len(par)])
reg = regs[np.argmax(CV_score)]
CV_scores.append(np.max(CV_score))
test_scores.append(accuracy_score(y_test,reg.predict(X_t)))
pre.append(precision_score(y_test,reg.predict(X_t)))
rec.append(recall_score(y_test,reg.predict(X_t)))
f13.append(fbeta_score(y_test,reg.predict(X_t),1.3))
# cf.append(confusion_matrix(y,reg.predict(X)))
return (CV_scores,test_scores,pre,rec,f13,best_par)
ac, pre, rec, f13 = [], [], [], []
for i in range(1,6):
op = svm_pip(X,y,[10*i,14*i,96*i],10)
ac.append(np.mean(op[1]))
pre.append(np.mean(op[2]))
rec.append(np.mean(op[3]))
f13.append(np.mean(op[4]))
print("Mean of test accuracy scores: " + str(np.mean(ac)))
print("Std of test accuracy scores: " + str(np.std(ac)))
print("Mean of test pre scores: " + str(np.mean(pre)))
print("Std of test pre scores: " + str(np.std(pre)))
print("Mean of test rec scores: " + str(np.mean(rec)))
print("Std of test rec scores: " + str(np.std(rec)))
print("Mean of test f13 scores: " + str(np.mean(f13)))
print("Std of test f13 scores: " + str(np.std(f13)))
print(op[5])
from sklearn import svm
rst = np.array([[0, 0],[0, 0]])
for i in range(1,11):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = 10*i)
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_other[cont_ftr]
X_train_cat = X_other[cat_ftr]
X_train_od = X_other[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_other[bi_ftr]),X_train_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
print(X_t.shape)
reg = svm.SVC(C=1567, gamma=.015632)
pickle.dump(reg, open("svm.sav", 'wb'))
reg.fit(X_train,y_other)
rst = rst + np.array( confusion_matrix(y_test,reg.predict(X_t)) /sum(confusion_matrix(y_test,reg.predict(X_t))))
rst/10
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
def knn_pip(X,y,random_state,n_folds):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state[0])
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state[1])
CV_scores = []
test_scores = []
pre = []
rec = []
f13 = []
cf = []
for train_index, CV_index in kf.split(X_other,y_other):
X_train, X_CV = X_other.iloc[train_index], X_other.iloc[CV_index]
y_train, y_CV = y_other.iloc[train_index], y_other.iloc[CV_index]
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_train[cont_ftr]
X_train_cat = X_train[cat_ftr]
X_train_od = X_train[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_train[bi_ftr]),X_train_od),axis=1)
X_c_cont = X_CV[cont_ftr]
X_c_cat = X_CV[cat_ftr]
X_c_od = X_CV[ord_ftr]
X_c_cont = scaler.transform(X_c_cont)
X_c_cat = oh.transform(X_c_cat)
X_c_od = od.transform(X_c_od)
X_c = np.concatenate((X_c_cont,X_c_cat,
np.array(X_CV[bi_ftr]),X_c_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
train_score = []
CV_score = []
regs = []
par = range(3,15)
for i in par:
for j in par:
reg = KNeighborsClassifier(l=i, weights='distance',p=j)
reg.fit(X_train,y_train)
train_score.append(accuracy_score(y_train,reg.predict(X_train)))
CV_score.append(accuracy_score(y_CV,reg.predict(X_c)))
regs.append(reg)
best_par = (par[np.argmax(CV_score)//len(par)],par[np.argmin(CV_score)%len(par)])
reg = regs[np.argmax(CV_score)]
CV_scores.append(np.max(CV_score))
test_scores.append(accuracy_score(y_test,reg.predict(X_t)))
pre.append(precision_score(y_test,reg.predict(X_t)))
rec.append(recall_score(y_test,reg.predict(X_t)))
f13.append(fbeta_score(y_test,reg.predict(X_t),1.3))
# cf.append(confusion_matrix(y,reg.predict(X)))
return (CV_scores,test_scores,pre,rec,f13,best_par)
ac, pre, rec, f13 = [], [], [], []
for i in range(1,6):
op = knn_pip(X,y,[10*i,14*i,96*i],10)
ac.append(np.mean(op[1]))
pre.append(np.mean(op[2]))
rec.append(np.mean(op[3]))
f13.append(np.mean(op[4]))
print("Mean of test accuracy scores: " + str(np.mean(ac)))
print("Std of test accuracy scores: " + str(np.std(ac)))
print("Mean of test pre scores: " + str(np.mean(pre)))
print("Std of test pre scores: " + str(np.std(pre)))
print("Mean of test rec scores: " + str(np.mean(rec)))
print("Std of test rec scores: " + str(np.std(rec)))
print("Mean of test f13 scores: " + str(np.mean(f13)))
print("Std of test f13 scores: " + str(np.std(f13)))
print(op[5])
from sklearn.neighbors import KNeighborsClassifier
rst = np.array([[0, 0],[0, 0]])
for i in range(1,6):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = 14*i)
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_other[cont_ftr]
X_train_cat = X_other[cat_ftr]
X_train_od = X_other[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_other[bi_ftr]),X_train_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
reg = KNeighborsClassifier(n_neighbors=3, weights='distance',p=13)
pickle.dump(reg, open("knn.sav", 'wb'))
reg.fit(X_train,y_other)
rst = rst + np.array(confusion_matrix(y_test,reg.predict(X_t))/sum(confusion_matrix(y_test,reg.predict(X_t))))
rst/5
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import precision_score, recall_score, fbeta_score
from sklearn.metrics import confusion_matrix
def ada_pip(X,y,random_state,n_folds):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state[0])
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state[1])
CV_scores = []
test_scores = []
pre = []
rec = []
f13 = []
cf = []
for train_index, CV_index in kf.split(X_other,y_other):
X_train, X_CV = X_other.iloc[train_index], X_other.iloc[CV_index]
y_train, y_CV = y_other.iloc[train_index], y_other.iloc[CV_index]
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_train[cont_ftr]
X_train_cat = X_train[cat_ftr]
X_train_od = X_train[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_train[bi_ftr]),X_train_od),axis=1)
X_c_cont = X_CV[cont_ftr]
X_c_cat = X_CV[cat_ftr]
X_c_od = X_CV[ord_ftr]
X_c_cont = scaler.transform(X_c_cont)
X_c_cat = oh.transform(X_c_cat)
X_c_od = od.transform(X_c_od)
X_c = np.concatenate((X_c_cont,X_c_cat,
np.array(X_CV[bi_ftr]),X_c_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
train_score = []
CV_score = []
regs = []
par1 = range(1,15)
par2 = np.logspace(1,-4,num=25)
for i in par1:
for j in par2:
reg = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=i),learning_rate=j,
n_estimators=100, random_state=random_state[2])
reg.fit(X_train,y_train)
train_score.append(accuracy_score(y_train,reg.predict(X_train)))
CV_score.append(accuracy_score(y_CV,reg.predict(X_c)))
regs.append(reg)
best_par = (par1[np.argmax(CV_score)//(len(par1))],
par2[np.argmax(CV_score)%(len(par1))])
reg = regs[np.argmax(CV_score)]
CV_scores.append(np.max(CV_score))
test_scores.append(accuracy_score(y_test,reg.predict(X_t)))
pre.append(precision_score(y_test,reg.predict(X_t)))
rec.append(recall_score(y_test,reg.predict(X_t)))
f13.append(fbeta_score(y_test,reg.predict(X_t),1.3))
# cf.append(confusion_matrix(y,reg.predict(X)))
return (CV_scores,test_scores,pre,rec,f13,best_par)
ac, pre, rec, f13 = [], [], [], []
for i in range(1,6):
op = ada_pip(X,y,[10*i,14*i,96*i],10)
ac.append(np.mean(op[1]))
pre.append(np.mean(op[2]))
rec.append(np.mean(op[3]))
f13.append(np.mean(op[4]))
print("Mean of test accuracy scores: " + str(np.mean(ac)))
print("Std of test accuracy scores: " + str(np.std(ac)))
print("Mean of test pre scores: " + str(np.mean(pre)))
print("Std of test pre scores: " + str(np.std(pre)))
print("Mean of test rec scores: " + str(np.mean(rec)))
print("Std of test rec scores: " + str(np.std(rec)))
print("Mean of test f13 scores: " + str(np.mean(f13)))
print("Std of test f13 scores: " + str(np.std(f13)))
print(op[5])
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
rst = np.array([[0, 0],[0, 0]])
for i in range(1,11):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = 14*i)
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_other[cont_ftr]
X_train_cat = X_other[cat_ftr]
X_train_od = X_other[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_other[bi_ftr]),X_train_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
reg = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=6),learning_rate=0.1957,
n_estimators=100, random_state=16*i)
pickle.dump(reg, open("ada.sav", 'wb'))
reg.fit(X_train,y_other)
rst = rst + np.array(confusion_matrix(y_test,reg.predict(X_t))/sum(confusion_matrix(y_test,reg.predict(X_t))))
rst/10
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, fbeta_score
from sklearn.ensemble import GradientBoostingClassifier
def xg_pip(X,y,random_state,n_folds):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state[0])
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state[1])
CV_scores = []
test_scores = []
pre = []
rec = []
f13 = []
cf = []
for train_index, CV_index in kf.split(X_other,y_other):
X_train, X_CV = X_other.iloc[train_index], X_other.iloc[CV_index]
y_train, y_CV = y_other.iloc[train_index], y_other.iloc[CV_index]
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_train[cont_ftr]
X_train_cat = X_train[cat_ftr]
X_train_od = X_train[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_train[bi_ftr]),X_train_od),axis=1)
X_c_cont = X_CV[cont_ftr]
X_c_cat = X_CV[cat_ftr]
X_c_od = X_CV[ord_ftr]
X_c_cont = scaler.transform(X_c_cont)
X_c_cat = oh.transform(X_c_cat)
X_c_od = od.transform(X_c_od)
X_c = np.concatenate((X_c_cont,X_c_cat,
np.array(X_CV[bi_ftr]),X_c_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
train_score = []
CV_score = []
regs = []
par1 = range(10,201,10)
par2 = np.logspace(1,-4,num=25)
for i in par1:
for j in par2:
reg = GradientBoostingClassifier(n_estimators=i,learning_rate=j,
random_state=random_state[2])
reg.fit(X_train,y_train)
train_score.append(accuracy_score(y_train,reg.predict(X_train)))
CV_score.append(accuracy_score(y_CV,reg.predict(X_c)))
regs.append(reg)
# best_par = (par1[np.argmax(CV_score)//(len(par1))],
# par2[np.argmax(CV_score)%(len(par1))])
reg = regs[np.argmax(CV_score)]
CV_scores.append(np.max(CV_score))
test_scores.append(accuracy_score(y_test,reg.predict(X_t)))
pre.append(precision_score(y_test,reg.predict(X_t)))
rec.append(recall_score(y_test,reg.predict(X_t)))
f13.append(fbeta_score(y_test,reg.predict(X_t),1.3))
# cf.append(confusion_matrix(y,reg.predict(X)))
return (CV_scores,test_scores,pre,rec,f13)
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
rst = np.array([[0, 0],[0, 0]])
for i in range(1,6):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = 14*i)
scaler = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_other[cont_ftr]
X_train_cat = X_other[cat_ftr]
X_train_od = X_other[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = oh.fit_transform(X_train_cat)
X_train_od = od.fit_transform(X_train_od)
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_other[bi_ftr]),X_train_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = oh.transform(X_t_cat)
X_t_od = od.transform(X_t_od)
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
reg = GradientBoostingClassifier(n_estimators=160,learning_rate=0.06,
random_state=16*i)
pickle.dump(reg, open("xg.sav", 'wb'))
reg.fit(X_train,y_other)
rst = rst + np.array(confusion_matrix(y_test,reg.predict(X_t))/sum(confusion_matrix(y_test,reg.predict(X_t))))
rst/5
ac, pre, rec, f13 = [], [], [], []
for i in range(1,6):
op = xg_pip(X,y,[10*i,14*i,96*i],10)
ac.append(np.mean(op[1]))
pre.append(np.mean(op[2]))
rec.append(np.mean(op[3]))
f13.append(np.mean(op[4]))
print("Mean of test accuracy scores: " + str(np.mean(ac)))
print("Std of test accuracy scores: " + str(np.std(ac)))
print("Mean of test pre scores: " + str(np.mean(pre)))
print("Std of test pre scores: " + str(np.std(pre)))
print("Mean of test rec scores: " + str(np.mean(rec)))
print("Std of test rec scores: " + str(np.std(rec)))
print("Mean of test f13 scores: " + str(np.mean(f13)))
print("Std of test f13 scores: " + str(np.std(f13)))
# print(op[5])
```
Mean of test accuracy scores: 0.8243956043956043
Std of test accuracy scores: 0.030129936376547903
Mean of test pre scores: 0.8838646360404766
Std of test pre scores: 0.018200243957284964
Mean of test rec scores: 0.8760111308990162
Std of test rec scores: 0.04114048583585612
Mean of test f13 scores: 0.8771853207368455
Std of test f13 scores: 0.02578465408647863
```
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, fbeta_score
def ML_pipeline_kfold_LR1(X,y,random_state,n_folds):
# create a test set
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state)
# splitter for _other
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state)
# create the pipeline: preprocessor + supervised ML method
cat_ftrs = cat_ftr
cont_ftrs = cont_ftr + bi_ftr
# one-hot encoder
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(sparse=False,handle_unknown='ignore'))])
# standard scaler
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, cont_ftrs),
('cat', categorical_transformer, cat_ftrs)])
final_scaler = StandardScaler()
pipe = make_pipeline(preprocessor,final_scaler,LogisticRegression(penalty='l1',solver='saga',max_iter=10**4))
# the parameter(s) we want to tune
param_grid = {'logisticregression__C':np.logspace(1,-3,num=50)}
# prepare gridsearch
grid = GridSearchCV(pipe, param_grid=param_grid,cv=kf, return_train_score = True,n_jobs=-1)
# do kfold CV on _other
grid.fit(X_other, y_other)
feature_names = cont_ftrs + \
list(grid.best_estimator_[0].named_transformers_['cat'][0].get_feature_names(cat_ftrs))
return grid, np.array(feature_names), X_test, y_test
grid, feature_names, X_test, y_test = ML_pipeline_kfold_LR1(X,y,42,5)
print('test score:',grid.score(X_test,y_test))
coefs = grid.best_estimator_[-1].coef_[0]
sorted_indcs = np.argsort(np.abs(coefs))
plt.rcParams.update({'font.size': 14})
plt.barh(np.arange(10),coefs[sorted_indcs[-10:]])
plt.yticks(np.arange(10),feature_names[sorted_indcs[-10:]])
plt.xlabel('coefficient')
# plt.title('not all scaled')
plt.tight_layout()
plt.savefig('log_ftr.png',dpi=300)
plt.show()
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score, fbeta_score
from sklearn.metrics import confusion_matrix
def logit_pip_ftr(X,y,random_state,n_folds):
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size=0.3, random_state = random_state[0])
kf = StratifiedKFold(n_splits=n_folds,shuffle=True,random_state=random_state[1])
CV_scores = []
test_scores = []
pre = []
rec = []
f13 = []
cf = []
for train_index, CV_index in kf.split(X_other,y_other):
X_train, X_CV = X_other.iloc[train_index], X_other.iloc[CV_index]
y_train, y_CV = y_other.iloc[train_index], y_other.iloc[CV_index]
scaler = StandardScaler()
scaler2 = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X_train[cont_ftr]
X_train_cat = X_train[cat_ftr]
X_train_od = X_train[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = scaler2.fit_transform(oh.fit_transform(X_train_cat))
X_train_od = scaler2.fit_transform(od.fit_transform(X_train_od))
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X_train[bi_ftr]),X_train_od),axis=1)
X_c_cont = X_CV[cont_ftr]
X_c_cat = X_CV[cat_ftr]
X_c_od = X_CV[ord_ftr]
X_c_cont = scaler.transform(X_c_cont)
X_c_cat = scaler2.fit_transform(oh.transform(X_c_cat))
X_c_od = scaler2.fit_transform(od.transform(X_c_od))
X_c = np.concatenate((X_c_cont,X_c_cat,
np.array(X_CV[bi_ftr]),X_c_od),axis=1)
X_t_cont = X_test[cont_ftr]
X_t_cat = X_test[cat_ftr]
X_t_od = X_test[ord_ftr]
X_t_cont = scaler.transform(X_t_cont)
X_t_cat = scaler2.fit_transform(oh.transform(X_t_cat))
X_t_od = scaler2.fit_transform(od.transform(X_t_od))
X_t = np.concatenate((X_t_cont,X_t_cat,
np.array(X_test[bi_ftr]),X_t_od),axis=1)
cs = np.logspace(1,-3,num=50)
train_score = []
CV_score = []
regs = []
for c in cs:
reg = LogisticRegression(penalty="l1",C=c,solver="saga",max_iter=10**4)
reg.fit(X_train,y_train)
train_score.append(accuracy_score(y_train,reg.predict(X_train)))
CV_score.append(accuracy_score(y_CV,reg.predict(X_c)))
regs.append(reg)
best_c = cs[np.argmax(CV_score)]
reg = regs[np.argmax(CV_score)]
CV_scores.append(np.max(CV_score))
test_scores.append(accuracy_score(y_test,reg.predict(X_t)))
pre.append(precision_score(y_test,reg.predict(X_t)))
rec.append(recall_score(y_test,reg.predict(X_t)))
f13.append(fbeta_score(y_test,reg.predict(X_t),1.3))
return (CV_scores,test_scores,pre,rec,f13,best_c)
logit_pip_ftr(X,y,[10,14],5)
scaler = StandardScaler()
scaler2 = StandardScaler()
oh = OneHotEncoder(sparse=False,handle_unknown = "ignore")
od = OrdinalEncoder(categories = [['N','mild','Moderate','Severe']])
X_train_cont = X[cont_ftr]
X_train_cat = X[cat_ftr]
X_train_od = X[ord_ftr]
X_train_cont = scaler.fit_transform(X_train_cont)
X_train_cat = scaler2.fit_transform(oh.fit_transform(X_train_cat))
X_train_od = scaler2.fit_transform(od.fit_transform(X_train_od))
X_train = np.concatenate((X_train_cont,X_train_cat,
np.array(X[bi_ftr]),X_train_od),axis=1)
reg = LogisticRegression(penalty="l1",C=0.33932217718953295,solver="saga",max_iter=10**4)
reg.fit(X_train,y)
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Time series forecasting
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/beta/tutorials/text/time_series"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/text/time_series.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/text/time_series.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/text/time_series.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial is an introduction to time series forecasting using Recurrent Neural Networks (RNNs). This is covered in two parts: first, you will forecast a univariate time series, then you will forecast a multivariate time series.
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
```
## The weather dataset
This tutorial uses a [weather time series dataset](https://www.bgc-jena.mpg.de/wetter/) recorded by the [Max-Planck-Institute for Biogeochemistry](https://www.bgc-jena.mpg.de/index.php/Main/HomePage).
This dataset contains 14 different features such as air temperature, atmospheric pressure, and humidity. These were collected every 10 minutes, beginning in 2003. For efficiency, you will use only the data collected between 2009 and 2016. This section of the dataset was prepared by [François Chollet](https://twitter.com/fchollet).
```
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
df = pd.read_csv(csv_path)
```
Let's take a glance at the data.
```
df.head()
```
As you can see above, an observation is recorded every 10 mintues. This means that, for a single hour, you will have 6 observations. Similarly, a single day will contain 144 (6x24) observations.
Given a specific time, let's say you want to predict the temperature 6 hours in the future. In order to make this prediction, you choose to use 5 days of observations. Thus, you would create a window containing the last 720(5x144) observations to train the model. Many such configurations are possible, making this dataset a good one to experiment with.
The function below returns the above described windows of time for the model to train on. The parameter `history_size` is the size of the past window of information. The `target_size` is how far in the future does the model need to learn to predict. The `target_size` is the label that needs to be predicted.
```
def univariate_data(dataset, start_index, end_index, history_size, target_size):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i)
# Reshape data from (history_size,) to (history_size, 1)
data.append(np.reshape(dataset[indices], (history_size, 1)))
labels.append(dataset[i+target_size])
return np.array(data), np.array(labels)
```
In both the following tutorials, the first 300,000 rows of the data will be the training dataset, and there remaining will be the validation dataset. This amounts to ~2100 days worth of training data.
```
TRAIN_SPLIT = 300000
```
Setting seed to ensure reproducibility.
```
tf.random.set_seed(13)
```
## Part 1: Forecast a univariate time series
First, you will train a model using only a single feature (temperature), and use it to make predictions for that value in the future.
Let's first extract only the temperature from the dataset.
```
uni_data = df['T (degC)']
uni_data.index = df['Date Time']
uni_data.head()
```
Let's observe how this data looks across time.
```
uni_data.plot(subplots=True)
uni_data = uni_data.values
```
It is important to normalize features before training a neural network. A common way to do so is by subtracting the mean and dividing by the standard deviation of each feature.
Note: The mean and standard deviation should only be computed using the training data.
```
uni_train_mean = uni_data[:TRAIN_SPLIT].mean()
uni_train_std = uni_data[:TRAIN_SPLIT].std()
```
Let's normalize the data.
```
uni_data = (uni_data-uni_train_mean)/uni_train_std
```
Let's now create the data for the univariate model. For part 1, the model will be given the last 20 recorded temperature observations, and needs to learn to predict the temperature at the next time step.
```
univariate_past_history = 20
univariate_future_target = 0
x_train_uni, y_train_uni = univariate_data(uni_data, 0, TRAIN_SPLIT,
univariate_past_history,
univariate_future_target)
x_val_uni, y_val_uni = univariate_data(uni_data, TRAIN_SPLIT, None,
univariate_past_history,
univariate_future_target)
```
This is what the `univariate_data` function returns.
```
print ('Single window of past history')
print (x_train_uni[0])
print ('\n Target temperature to predict')
print (y_train_uni[0])
```
Now that the data has been created, let's take a look at a single example. The information given to the network is given in blue, and it must predict the value at the red cross.
```
def create_time_steps(length):
time_steps = []
for i in range(-length, 0, 1):
time_steps.append(i)
return time_steps
def show_plot(plot_data, delta, title):
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'rx', 'go']
time_steps = create_time_steps(plot_data[0].shape[0])
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, x in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10,
label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future+5)*2])
plt.xlabel('Time-Step')
return plt
show_plot([x_train_uni[0], y_train_uni[0]], 0, 'Sample Example')
```
### Baseline
Before proceeding to train a model, let's first set a simple baseline. Given an input point, the baseline method looks at all the history and predicts the next point to be the average of the last 20 observations.
```
def baseline(history):
return np.mean(history)
show_plot([x_train_uni[0], y_train_uni[0], baseline(x_train_uni[0])], 0,
'Baseline Prediction Example')
```
Let's see if you can beat this baseline using a recurrent neural network.
### Recurrent neural network
A Recurrent Neural Network (RNN) is a type of neural network well-suited to time series data. RNNs process a time series step-by-step, maintaining an internal state summarizing the information they've seen so far. For more details, read the [RNN tutorial](https://www.tensorflow.org/tutorials/sequences/recurrent). In this tutorial, you will use a specialized RNN layer called Long Short Tem Memory ([LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM))
Let's now use `tf.data` to shuffle, batch, and cache the dataset.
```
BATCH_SIZE = 256
BUFFER_SIZE = 10000
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))
train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))
val_univariate = val_univariate.batch(BATCH_SIZE).repeat()
```
The following visualisation should help you understand how the data is represented after batching.

You will see the LSTM requires the input shape of the data it is being given. This shape can be inferred from dataset created.
```
simple_lstm_model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),
tf.keras.layers.Dense(1)
])
simple_lstm_model.compile(optimizer='adam', loss='mae')
```
Let's make a sample prediction, to check the output of the model.
```
for x, y in val_univariate.take(1):
print(simple_lstm_model.predict(x).shape)
```
Let's train the model now. Due to the large size of the dataset, in the interest of saving time, each epoch will only run for 200 steps, instead of the complete training data as normally done.
```
EVALUATION_INTERVAL = 200
EPOCHS = 10
simple_lstm_model.fit(train_univariate, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_data=val_univariate, validation_steps=50)
```
#### Predict using the simple LSTM model
Now that you have trained your simple LSTM, let's try and make a few predictions.
```
for x, y in val_univariate.take(3):
plot = show_plot([x[0].numpy(), y[0].numpy(),
simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')
plot.show()
```
This looks better than the baseline. Now that you have seen the basics, let's move on to part two, where you will work with a multivariate time series.
## Part 2: Forecast a multivariate time series
The original dataset contains fourteen features. For simplicity, this section considers only three of the original fourteen. The features used are air temperature, atmospheric pressure, and air density.
To use more features, add their names to this list.
```
features_considered = ['p (mbar)', 'T (degC)', 'rho (g/m**3)']
features = df[features_considered]
features.index = df['Date Time']
features.head()
```
Let's have a look at how each of these features vary across time.
```
features.plot(subplots=True)
```
As mentioned, the first step will be to normalize the dataset using the mean and standard deviation of the training data.
```
dataset = features.values
data_mean = dataset.mean(axis=0)
data_std = dataset.std(axis=0)
dataset = (dataset-data_mean)/data_std
```
### Single step model
In a single step setup, the model learns to predict a single point in the future based on some history provided.
The below function performs the same windowing task as below, however, here it samples the past observation based on the step size given.
```
def multivariate_data(dataset, target, start_index, end_index, history_size,
target_size, step, single_step=False):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i+target_size])
else:
labels.append(target[i:i+target_size])
return np.array(data), np.array(labels)
```
In this tutorial, the network is shown data from the last five (5) days, i.e. 720 observations that are sampled every hour. The sampling is done every one hour since a drastic change is not expected within 60 minutes. Thus, 120 observation represent history of the last five days. For the single step prediction model, the label for a datapoint is the temperature 12 hours into the future. In order to create a label for this, the temperature after 72(12*6) observations is used.
```
past_history = 720
future_target = 72
STEP = 6
x_train_single, y_train_single = multivariate_data(dataset, dataset[:, 1], 0,
TRAIN_SPLIT, past_history,
future_target, STEP,
single_step=True)
x_val_single, y_val_single = multivariate_data(dataset, dataset[:, 1],
TRAIN_SPLIT, None, past_history,
future_target, STEP,
single_step=True)
```
Let's look at a single data-point.
```
print ('Single window of past history : {}'.format(x_train_single[0].shape))
train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single))
train_data_single = train_data_single.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
val_data_single = tf.data.Dataset.from_tensor_slices((x_val_single, y_val_single))
val_data_single = val_data_single.batch(BATCH_SIZE).repeat()
single_step_model = tf.keras.models.Sequential()
single_step_model.add(tf.keras.layers.LSTM(32,
input_shape=x_train_single.shape[-2:]))
single_step_model.add(tf.keras.layers.Dense(1))
single_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae')
```
Let's check out a sample prediction.
```
for x, y in val_data_single.take(1):
print(single_step_model.predict(x).shape)
single_step_history = single_step_model.fit(train_data_single, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_data=val_data_single,
validation_steps=50)
def plot_train_history(history, title):
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title(title)
plt.legend()
plt.show()
plot_train_history(single_step_history,
'Single Step Training and validation loss')
```
#### Predict a single step future
Now that the model is trained, let's make a few sample predictions. The model is given the history of three features over the past five days sampled every hour (120 data-points), since the goal is to predict the temperature, the plot only displays the past temperature. The prediction is made one day into the future (hence the gap between the history and prediction).
```
for x, y in val_data_single.take(3):
plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(),
single_step_model.predict(x)[0]], 12,
'Single Step Prediction')
plot.show()
```
### Multi-Step model
In a multi-step prediction model, given a past history, the model needs to learn to predict a range of future values. Thus, unlike a single step model, where only a single future point is predicted, a multi-step model predict a sequence of the future.
For the multi-step model, the training data again consists of recordings over the past five days sampled every hour. However, here, the model needs to learn to predict the temperature for the next 12 hours. Since an obversation is taken every 10 minutes, the output is 72 predictions. For this task, the dataset needs to be prepared accordingly, thus the first step is just to create it again, but with a different target window.
```
future_target = 72
x_train_multi, y_train_multi = multivariate_data(dataset, dataset[:, 1], 0,
TRAIN_SPLIT, past_history,
future_target, STEP)
x_val_multi, y_val_multi = multivariate_data(dataset, dataset[:, 1],
TRAIN_SPLIT, None, past_history,
future_target, STEP)
```
Let's check out a sample data-point.
```
print ('Single window of past history : {}'.format(x_train_multi[0].shape))
print ('\n Target temperature to predict : {}'.format(y_train_multi[0].shape))
train_data_multi = tf.data.Dataset.from_tensor_slices((x_train_multi, y_train_multi))
train_data_multi = train_data_multi.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
val_data_multi = val_data_multi.batch(BATCH_SIZE).repeat()
```
Plotting a sample data-point.
```
def multi_step_plot(history, true_future, prediction):
plt.figure(figsize=(12, 6))
num_in = create_time_steps(len(history))
num_out = len(true_future)
plt.plot(num_in, np.array(history[:, 1]), label='History')
plt.plot(np.arange(num_out)/STEP, np.array(true_future), 'bo',
label='True Future')
if prediction.any():
plt.plot(np.arange(num_out)/STEP, np.array(prediction), 'ro',
label='Predicted Future')
plt.legend(loc='upper left')
plt.show()
```
In this plot and subsequent similar plots, the history and the future data are sampled every hour.
```
for x, y in train_data_multi.take(1):
multi_step_plot(x[0], y[0], np.array([0]))
```
Since the task here is a bit more complicated than the previous task, the model now consists of two LSTM layers. Finally, since 72 predictions are made, the dense layer outputs 72 predictions.
```
multi_step_model = tf.keras.models.Sequential()
multi_step_model.add(tf.keras.layers.LSTM(32,
return_sequences=True,
input_shape=x_train_multi.shape[-2:]))
multi_step_model.add(tf.keras.layers.LSTM(16, activation='relu'))
multi_step_model.add(tf.keras.layers.Dense(72))
multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0), loss='mae')
```
Let's see how the model predicts before it trains.
```
for x, y in val_data_multi.take(1):
print (multi_step_model.predict(x).shape)
multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_data=val_data_multi,
validation_steps=50)
plot_train_history(multi_step_history, 'Multi-Step Training and validation loss')
```
#### Predict a multi-step future
Let's now have a look at how well your network has learnt to predict the future.
```
for x, y in val_data_multi.take(3):
multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
```
## Next steps
This tutorial was a quick introduction to time series forecasting using an RNN. You may now try to predict the stock market and become a billionaire.
In addition, you may also write a generator to yield data (instead of the uni/multivariate_data function), which would be more memory efficient. You may also check out this [time series windowing](https://www.tensorflow.org/beta/guide/data#time_series_windowing) guide and use it in this tutorial.
For further understanding, you may read Chapter 15 of [Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/), 2nd Edition and Chapter 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
| github_jupyter |
##**Gender-speech-duration-calculator**
Here you will find a tool to calculate the percentage of time of female voice speech and male voice speech in a video/movie. You can either choose to paste a youtube link or you can upload your video. Install the program, choose one option and calculate the percentage of female and male speech.
##**Step 1: installation**
```
#@title Install gender-speech-duration-calculator
!apt-get install ffmpeg
!pip install inaSpeechSegmenter
!pip install youtube_dl
!pip install -qU ddsp
from google.colab import output
output.clear()
```
##**Step 2: upload/paste video**
```
#@title Option 1: paste your youtube link here
import youtube_dl
youtube_link= 'https://www.youtube.com/watch?v=UG_X_7g63rY&t=23s' #@param {type:"string"}
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'outtmpl': 'audio.%(etx)s',
'quiet': False
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([youtube_link])
output.clear()
#@title Option 2: click here to upload your video (it has to be mp4 format).
import moviepy.editor as mp
from ddsp.colab.colab_utils import upload
filenames, audios = upload()
tuple_file=filenames, audios[0][0]
video=tuple_file[0][0]
clip = mp.VideoFileClip("/content/"+video).subclip(0,20)
clip.audio.write_audiofile("audio.mp3")
```
##**Step 3: compute percentage**
```
#@title Click here to calculate the percentage of female/male voice speech (this may take a while).
from inaSpeechSegmenter import Segmenter, seg2csv
media = 'audio.mp3'
seg = Segmenter()
segmentation = seg(media)
female=0
male=0
for i in segmentation:
duration = i[2] - i[1]
if i[0] == "female":
female=female + duration
elif i[0] == "male":
male=male + duration
total_speech= female + male
def percentage(part, whole):
return 100 * float(part)/float(whole)
percentage_female=round(percentage(female, total_speech), 2)
percentage_male=round(percentage(male, total_speech), 2)
output.clear()
print("RESULTS:")
print("Female voice speech: "+str(percentage_female)+ "%.")
print("Male voice speech: "+str(percentage_male)+"%.")
```
##Important note:
Although we acknowledge there are many more genders than male/female, and that there is no such thing as a "woman's voice" or a "man's voice", this tool tries its best to make a contribution to make gender inequality more visible. Hopefully in the future there will be more developed tools that fight gender inequality for all genders.
| github_jupyter |
# SciPy
(documentação oficial: [docs.scipy.org](docs.scipy.org))
<img src="img/scipy.png" alt="ícone do pacote scipy - uma cobra branca desenhada num círculo azul" width=350>
O pacote **SciPy** é uma coleção de algoritmos e funções matemáticas construídos sobre o pacote <b><a style color="red">Numpy</a></b> do Python. Ele acrescenta poder significativo à sessão interativa do Python, fornecendo ao usuário comandos e classes de alto nível <b><a style color='blue'>para manipular e visualizar dados</a></b>. Com o **SciPy**, uma sessão Python interativa torna-se um ambiente de processamento de dados e prototipagem de sistemas, que rivaliza com os melhores softwares científicos: MATLAB, IDL, Octave, R-Lab e SciLab.
As aplicações científicas que usam o **SciPy** se beneficiam do desenvolvimento de módulos adicionais em inúmeras áreas da ciência, realizados por desenvolvedores em todo o mundo.
Por questões de conveniência e brevidade, já vamos importar os principais pacotes científicos, **Numpy**, **SciPy** e **Matplotlib**, com os seguintes comandos:
```
import numpy as np
import matplotlib as plt
import scipy as sc
```
## Subpacotes do Scipy
| Subpacote | Descrição |
|:------------------ |:--------------------------------------------------------------------- |
| cluster | Algoritmos de agrupamento |
| constants | Constantes físicas e matemáticas |
| **fft** | Rotinas de transformada rápida de Fourier |
| integrate | Integração e solucionadores de equação diferencial ordinária |
| interpolate | Interpolação e suavização Splines |
| io | Entrada e saída |
| **linalg** | Álgebra Linear |
| **misc** | Funções diversas: factorial, comb, lena, ... |
| ndimage | Processamento de imagem N-dimensional |
| odr | Regressão de distância ortogonal |
| optimize | Rotinas de optimização e localização de raízes |
| **signal** | Processamento de sinais |
| sparse | Rotinas de matrizes esparsas e associadas |
| spatial | Algoritmos de estruturas de dados espaciais |
| special | Funções especiais |
| stats | Funções e distribuições estatísticas |
```
import scipy.constants as scc
print(dir(scc))
print(scc.mph, scc.nano, scc.mach, scc.pound)
```
Os subpacotes do **Scipy** precisam ser importados separadamente, por exemplo:
```python
>>> from scipy import linalg, optimize
```
Para usar qualquer função de algum módulo (subpacote) do Scipy, use o seguinte esquema:
```python
>>> from scipy import algum_modulo
>>> algum_modulo.alguma_funcao()
```
**Exemplo**: Geração da Matriz de Pascal
$$Pascal(i,j)=\frac{(i+j-2)!}{(i-1)!(j-1)!}$$
```
from scipy import linalg
linalg.pascal(4)
```
<hr>
**Exemplo**: _Solução de Sistemas de Equações Lineares_ (coleção de duas ou mais equações lineares com o mesmo número de incógnitas). Para resolver um sistema de equações lineares, temos que encontrar os valores das incógnitas. É importante mencionar que neste tipo de **Álgebra Linear** nenhuma das incógnitas tem potência maior que um (daí o "Linear"). Por exemplo, um sistema de equações lineares com duas equações lineares e duas incógnitas tem a seguinte aparência:
6a + 3b = 27 # Equação 1
9a + 4b = 38 # Equação 2
Existem várias maneiras de resolver um sistema de equações lineares. Alguns dos métodos mais comumente usados são: a solução matricial, técnica de redução de linha e regra de Cramer. Vamos usar a solução matricial para resolver um sistema de equações lineares.
Ao aplicar a solução de matriz, a equação linear é representada na forma de matrizes:
A.X = B
Aqui, **A** é a matriz dos coeficientes à esquerda do sinal de igualdade, **X** é a matriz de incógnitas **a** e **b** e **B** é a matriz que contém as saídas, como 27 e 38 no caso do sistema anterior. As matrizes **A**, **X** e **B** terão a seguinte aparência:
A = [6 3]
[9 4]
X = [a]
[b]
B = [27]
[38]
Para encontrar o valor da matriz **X**, ou seja, as variáveis desconhecidas, usamos o inverso da matriz **A** multiplicada pela matriz **B**, conforme mostrado na seguinte equação:
X = A^(-1) · B
```
import scipy.linalg as sla
A = np.array([[6, 3], [9, 4]])
inv_A = sla.inv(A)
print(inv_A)
B = np.array([27, 38])
X = inv_A.dot(B) # multiplicação matricial
Y = inv_A * B # multiplicação elemento a elemento
print("Solução: a = %.1f, b = %.1f" % (X[0],X[1]))
print(A,"\n",B)
print(X,"\n",Y)
```
A saída mostra que os valores das variáveis desconhecidas (incógnitas) **a** e **b** do sistema de equações anterior são 2 e 5, respectivamente.
**Exercício**: Resolva o sistema de três equações e três incógnitas.
4a + 2b + 5c = 53
5a + 3b + 7c = 74
9a + 2b + 6c = 73
(Gabarito: [3. 8. 5.])
Além de usar a função `linalg.inv()` e fazer o produto escalar com a matriz de saída, pode-se chamar diretamente o método `linalg.solve()` e passar as matrizes **A** e **B** para obter a solução do sistema de equações. Esta é uma segunda maneira, possivelmente mais fácil de lembrar, de resolver um sistema de equações lineares com o pacote **NumPy**.
```
A = np.array([[4, 2, 5], [5, 3, 7], [9, 2, 6]])
B = np.array([53, 74, 73])
X = np.linalg.solve(A,B)
print("Solução: [a, b, c] =",X)
```
<hr>
O **Scipy** também contém algumas funções do pacote **Numpy** e do submódulo <b><tt>numpy.lib.scimath</tt></b>. No entanto, é melhor usá-las diretamente do pacote **Numpy**.
O subpacote **misc** do **Scipy** tem funções diversas, inclusive a função `face()` que retorna a imagem de um **guaxinim**, a qual pode ser usada no processamento digital de imagens.
```
import scipy.misc as sm # até a versão 0.17 do Scipy (removida por questões de licença incompatível)
import pylab as pl
guax = sm.face(True)
pl.imshow(guax)
print(sm.face().shape, sm.face().dtype)
print(sm.face().shape[0]*sm.face().shape[1]*sm.face().shape[2])
print(guax.shape, guax.dtype)
print(guax.shape[0]*guax.shape[1])
```
Se vc quiser usar a imagem da famosa **Lena Söderberg**, até a versão anterior a 0.17 do pacote **Scipy** havia disponível uma função no subpacote **misc** chamada `lena()`, que carregava a imagem do rosto/ombro da Lena..., mas por motivos de incompatibilidade de licenças a função teve que ser removida. Mas pode-se salvar a imagem num diretório e carregá-la usando:
```
img = pl.imread('dat/lena.dat')
pl.imshow(img)
# pl.show()
print(img.shape, img.dtype)
```
Algumas outras funções úteis também podem ser encontradas no subpacote **scipy.special**.
Por exemplo: **`factorial()`** e **`comb()`** que calculam o fatorial de `n`, `n!`, e a quantidade de combinações (não considera a ordem dos elementos) $C_{(n,k)}=\left(_k^n\right) = \frac{n!}{k!(n-k)!}$, usando aritmética inteira exata (graças ao objeto inteiro `Long`) ou usando a precisão de ponto flutuante e a função Gama.
<img src='img/comb.png' width=900>
#### Exemplo-1
Seja o conjunto $Z = \{A, B, C, D\}$. Quantos grupos podem ser formados agrupandos os elementos de $Z$, 2 a 2, sem levar em consideração a ordem dos elementos (os grupos não podem ter os mesmos elementos).
$$C_{4,2}=\binom{4}{2}=\frac{4!}{2!(4-2)!}=\frac{4.3.2!}{2!2!}=\frac{12}{2}=6\;\rm{grupos}$$
Quantos grupos podem ser formados agrupandos os elementos de $Z$, 2 a 2, considerando a ordem dos elementos (os grupos não podem ter elementos repetidos).
$$A_{4,2}=\binom{4}{2}=\frac{4!}{(4-2)!}=\frac{4.3.2!}{2!}=\frac{24}{2}=12\;\rm{grupos}$$
```
from scipy.special import comb, factorial
def arranjo(n,k):
return comb(n,k) * factorial(k)
# print("C(4,2) =", comb(4,2).astype(int), " 4! =", factorial(4).astype(int))
print("C(4,2) =%2d 4! =%3d" % (comb(4,2),factorial(4)))
print("A(4,2) =%3d" % (arranjo(4,2)))
```
#### Exemplo-2
Quantas apostas de seis números da MegaSena podem ser preenchidas (acerto das seis dezenas)?
$$C_{60,6}=\binom{60}{6}=\frac{60!}{6!(60-6)!}=\frac{60.59.58.57.56.55.54!}{6!54!}=50.063.860\;\rm{apostas}$$
```
from scipy.special import comb, factorial
print("C(60,6) =",comb(60,6).astype(int), " 6! =",factorial(6).astype(int))
print("C(60,6) =%9d 6! =%4d" % (comb(60,6),factorial(6)))
```
<hr>
### Transformada de Fourier - Subpacote _fftpack_ do Scipy
A análise de Fourier é um método usado na decomposição de um sinal periódico numa soma de componentes senoidais, ou na recuperação do sinal a partir dessas componentes. Quando o sinal e sua **Transformada de Fourier** são substituídos por suas versões discretizadas, ela passa a ser chamada de **Transformada Discreta de Fourier (DFT)**.
A DFT se tornou um dos pilares da computação numérica, em parte devido a um algoritmo muito rápido de cálculo, chamado _Fast Fourier Transform (FFT)_, que já era conhecido por Gauss (1805) e foi trazido à luz, em sua forma atual, por Cooley e Tukey (1965). Para quem a velocidade das rotinas FFT é crítica deve-se considerar a instalação do pacote PyFFTW.
A DFT `X[k]` de comprimento `N`, da sequência `x[n]`, também de comprimento `N`, é definida como:
$$X[k]=\sum_{n=0}^{N-1}x[n].e^{-j\,k.n.2\pi/N}$$
E a DFT inversa é definida como:
$$x[n]=\sum_{k=0}^{N-1}X[k].e^{j\,k.n.2\pi/N}$$
Essas transformações podem ser calculadas por meio das funções `fft()` e `ifft()`, respectivamente.
```
from scipy.fftpack import fft, ifft, fftshift
x = np.array([1.0, 2.0, 1.0, -1.0, 1.5]) # sequência no domínio original
X = fft(x) # transformada Discreta de Fourier
X # domínio da frequência (valores complexos)
xinv = ifft(X)
#print(Xinv.astype(float))
print(xinv)
print(xinv.real)
print(np.real_if_close(xinv))
```
O exemplo seguinte exibe a representação da magnitude da DFT da soma de duas senoides, com frequências de 50 e 80 Hz, respectivamente:
<img src='img/espectro.png' width=400>
```
import pylab as pl
t = np.arange(0,2,0.001)
# x = np.zeros(t.shape) #
x = 1.4*np.sin(2*np.pi*50*t)
y = np.sin(2*np.pi*80*t)
z = x + y
pl.figure(figsize=(18,5))
pl.subplot(1,2,1); pl.plot(t[:100],z[:100]); pl.xlabel('tempo (s)'); pl.title('Soma de Senóides');
Z = fft(z)/len(z)
w = np.arange(0,len(t)/4,0.5)
pl.subplot(1,2,2); pl.plot(w,abs(Z[:int(len(z)/2)])); pl.xlabel('freq. (Hz)'); pl.title('Espectro de Freq.s');
t.shape
```
## Processamento de Sinais - Subpacote _signal_
A caixa de ferramentas de processamento de sinais contém algumas funções de filtragem, um conjunto limitado de ferramentas de projeto de filtros e alguns algoritmos de interpolação B-spline para dados unidimensionais e bidimensionais. Embora os algoritmos B-spline possam tecnicamente ser colocados sob a categoria do subpacote `interpolate`, eles são incluídos aqui porque eles só trabalham com dados igualmente espaçados e fazem uso intenso da teoria de filtragem e do formalismo da função de transferência para fornecer uma transformação B-spline rápida. Todo sinal no SciPy é uma matriz de números reais ou complexos.
<img src='img/lena.png' width=400>
### Filtragem
Filtragem é um nome genérico dado à operação realizada por qualquer sistema que modifica um sinal de entrada de alguma forma. No **Scipy** um sinal pode ser visto como um arranjo (_array_) **Numpy**, vetor ou matriz.
Existem diferentes tipos de filtros para diferentes tipos de operações e, em geral, eles estão divididos em duas grandes categorias de operação de filtragem: linear e não linear. **Filtros lineares** podem sempre ser reduzidos a uma multiplicação de matrizes Numpy achatadas (todos os elementos da matriz são colocados numa única linha) resultando em outra matriz Numpy achatada.
```
# Retorna uma cópia do array colapsado a (reduzido a) uma dimensão --> numpy.ndarray.flatten(order='C')
a = np.array([[1,2], [3,4], [5,6]])
print(a)
print(a.flatten('C')) # construído por linha (default), forma usual na Ling. C
print(a.flatten('F')) # construído por coluna, forma usual na Ling. Fortran
```
É claro que esta não é a melhor forma para se calcular um filtro, pois as matrizes e vetores envolvidos podem ser enormes.
Por exemplo: a filtragem de uma imagem 512x512 pixels usando o método descrito, precisaria da multiplicação de uma matriz $512^2x512^2$ por um vetor de $512^2$ elementos. Apenas o armazenamento dessa matriz **Numpy** padrão, necessitaria de 68.719.476.736 elementos (68 bilhões). Usando armazenamento de 4 bytes por elemento (valor numérico real de precisão simples) seriam necessários mais de 256 GB de memória. Na maioria das aplicações os elementos dessa matriz são nulos e um método diferente para cálculo da saída deve ser empregado.
### Convolução/Correlação
Muitos filtros lineares também possuem a propriedade de invariância ao deslocamento (da variável independente: tempo, espaço etc.). Isto significa que a operação de filtragem é a mesma em locais diferentes do sinal e implica que a matriz de filtragem pode ser construída a partir do conhecimento de uma linha (ou coluna) da matriz. Neste caso, a multiplicação da matriz pode ser realizada usando as transformadas de Fourier.
Seja $x[n]$ um sinal unidimensional indexado pelo inteiro $n$. A **Convolução** de dois sinais unidimensionais pode ser expressa por:
$$y[n]=\sum_{k=-\infty}^\infty x[k].h[n-k]$$
Esta equação só pode ser implementada diretamente se limitarmos as sequências envolvidas, $x[n]$ e $h[n]$, a sequências de durações finitas, para que possam ser armazenadas na memória de um computador digital. Vamos escolher $n=0$ para ser o ponto inicial de ambas as sequências, e seja $K+1$ o valor para o qual $x[n]=0$ para todo $n\ge K+1$, e $M+1$ seja o valor para o qual $h[n]=0$ para todo $n \ge M+1$, então a expressão de convolução discreta será:
$$y[n]=\sum_{k=max(n-M,0)}^{min(n,K)} x[k].h[n-k]$$
A convolução discreta de duas sequências finitas de comprimentos $K+1$ e $M+1$, respectivamente, resulta numa sequência finita de comprimento $K+M+1 = (K+1)+(M+1)-1$.
A convolução unidimensional é implementada no **SciPy** com a função `convolve()`. Esta função tem como entradas os sinais `x[n]`, `h[n]`, e dois _flags_ opcionais `mode` e `method`, e retorna o sinal `y[n]`.
O _flag_ `mode` (opcional) permite especificar qual parte do sinal de saída será retornada. O valor padrão `full` retorna o resultado cheio, ou seja, todos os valores calculados na convolução (inclusive os nulos). Se o _flag_ for `same`, somente os $K$ valores centrais serão retornados, começando em $y[⌊(M-1)/2⌋]$, de modo que a saída tenha o mesmo comprimento do primeiro sinal das entradas. Se o _flag_ for `valid` então somente os `K-M+1` valores de saída são retornados.
O segundo _flag_ `method`, opcional, determina como a convolução é calculada, seja através da abordagem da transformada de Fourier com `fftconvolve()` ou através do método direto (somatório da definição). Normalmente é selecionado o método esperado mais rápido. O método da transformada de Fourier tem ordem de grandeza $O(N.logN)$, enquanto o método direto tem ordem de grandeza $O(N^2)$. Dependendo da constante $O$ e do valor de $N$, um desses métodos pode ser mais rápido que o outro. O valor padrão `auto` executa um cálculo aproximado e escolhe o método esperado mais rápido, enquanto os valores `direct` e `fft` forçam a computação com os outros dois métodos. O código abaixo mostra um exemplo simples de convolução de duas sequências:
```
from scipy.signal import convolve
x = np.array([1.0, 2.0, 3.0])
h = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
print(convolve(x, h))
print(convolve(x, h, 'same'))
```
Multiplicação polinomial:
$$(x^2-4x+6).(x-3) = x^3-4x^2+6x-3x^2+12x-18 = x^3-7x^2+18x-18$$
```
p1 = np.array([1,-4,6])
p2 = np.array([1,-3])
res = convolve(p1,p2)
print("Resultado da multiplicação polinomial: ", res)
```
#### Cálculo da Convolução pelo Método da Tabela (manual)
<img src='img/metodo_tabela.png' width=600>
Essa mesma função `convolve()` pode usar matrizes N-dimensionais como entradas e retornar a convolução N-dimensional entre as duas matrizes, conforme mostrado no exemplo seguinte. Os mesmos _flags_ de entrada também estão disponíveis para esse caso.
```
from scipy import signal
x = np.array([[1.,1.,0.,0.],[1.,1.,0.,0.],[0.,0.,0.,0.],[0.,0.,0.,0.]])
h = np.array([[1.,0.,0.,0.],[0.,0.,0.,0.],[0.,0.,1.,0.],[0.,0.,0.,0.]])
signal.convolve(x, h)
```
A **correlação** é uma operação muito semelhante à convolução, em termos de equação, troca-se apenas o sinal negativo por um positivo. Portanto, a correlação cruzada dos sinais `x[n]` e `y[n]`, é dada por:
$$w[n]=\sum_{k=-\infty}^\infty y[k].x[n+k]$$
Para sinais de comprimento finito com $y[n]=0$ fora do intervalo $[0,K]$, e $x[n]=0$ fora do intervalo $[0,M]$, o somatório seria:
$$w[n]= \sum_{k=máx(n-M,0)}^{mín(n,K)} y[k].x[n+k]$$
A função `scipy.correlate()` implementa a correlação. _Flags_ equivalentes aos da função `convolve()` estão disponíveis para esta operação, retornando uma sequência completa de comprimento $K+M+1$ (`full`), ou uma sequência com o mesmo tamanho da maior sequência iniciando em $w[-K+⌊M-1⌋⁄2]$ (`same`) ou uma sequência onde os valores dependem de todos os valores da menor sequência (`valid`). Esta opção final retorna os $K − M + 1$ valores da sequência $w[M-K]$ até $w[0]$, inclusive.
Quando $N=2$, correlação ou convolução, pode ser usado para construir filtros de imagem arbitrários para executar ações como desfoque, melhoramento e detecção de borda para uma imagem.
```
import numpy as np
from scipy import signal, misc
import matplotlib.pyplot as plt
guaxinim = misc.face(gray=True) # representado em escala de cinza
filtro = np.zeros((50, 50))
for i in range(50):
filtro[i][i] = 1.
#filtro[0][0] = 1.0
#filtro[49][25] = 1.0
guaxfilt = signal.fftconvolve(guaxinim, filtro)
print(filtro)
plt.figure(figsize=(10,8))
plt.subplot(121); plt.imshow(guaxinim); plt.gray(); plt.title('Imagem Original')
plt.subplot(122); plt.imshow(guaxfilt); plt.gray(); plt.title('Imagem Filtrada')
plt.show()
```
O cálculo da convolução no domínio do tempo, como acima, é usado principalmente para filtrar sinais quando um deles é muito menor do que o outro ($K≫M$), caso contrário, a filtragem linear é calculada com mais eficiência no domínio de frequência, usando a função `fftconvolve()`. Por padrão, `convolve()` estima o método mais rápido usando `choose_conv_method()`.
Se a matriz `filtro[n,m]` puder ser fatorada de acordo com: $h[n,m]=h_1[n].h_2[m]$, a convolução então pode ser calculada por meio da função `sepfir2d()`.
**Exemplo**:
Seja um filtro gaussiano $h[n,m]∝e^{n^2-m^2}=e^{n^2}.e^{-m^2}$, o qual é frequentemente usado para borrar imagens.
```
imagem = misc.ascent()
filtro = signal.gaussian(25, 20.0)
imgnova = signal.sepfir2d(imagem, filtro, filtro)
plt.figure(figsize=(10,8))
plt.subplot(121); plt.imshow(imagem); plt.gray(); plt.title('Imagem Original')
plt.subplot(122); plt.imshow(imgnova); plt.gray(); plt.title('Imagem Filtrada')
plt.show()
help(signal.gaussian)
```
**help(gaussian)**
<pre>
scipy.signal.gaussian(M, std, sym=True)
Return a Gaussian window.
Parameters:
M : int - Number of points in the output window. If zero or less, an empty array is returned.
std : float - The standard deviation, sigma.
sym : bool, optional - When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis.
Returns:
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1 does not appear if M is even and sym is True).</pre>
**Exercício**:
Use a imagem da Lena para fazer um embaçamento na área do rosto, de modo que não se identifique a pessoa.
```
# possível solução
from pylab import imread, imshow, figure, subplot
from scipy.signal import sepfir2d, gaussian
from numpy import uint8, float32, array
def norm8(img):
mn = img.min()
mx = img.max()
I = (img - mn)/(mx - mn)*255
return I.astype(uint8)
filtro = gaussian(30, 20.0).astype(float32)
# possível solução
lena = array(imread('dat/lena.dat'))
imshow(lena)
face = lena[200:390,250:350] # rosto
figure(figsize=(10,8), dpi=80)
subplot(2,2,1); imshow(face)
emba = sepfir2d(lena[200:390,250:350], filtro, filtro) # embaçamento
embb = norm8(emba)
subplot(2,2,2); imshow(embb)
lena[200:390,250:350] = embb # rosto embaçado recolocado na imagem
subplot(2,1,2); imshow(lena);
```
<p style="text-align:center;">>>>>> <a href="http:/notebooks/index.ipynb">Volta ao Índice... </a><<<<<
Fontes:
1. https://wellsr.com/python/solving-a-system-of-linear-equations-with-numpy/
| github_jupyter |
# En este ejercicio vamos a optimizar parámetros #
(Credits to https://github.com/codiply/blog-ipython-notebooks/blob/master/scikit-learn-estimator-selection-helper.ipynb )
Para optimizar los parámetros usaremos un GridSearch.
Y comparar clasificadores.
<div class="alert alert-danger" role="alert">
Este ejemplo es para python v2.x, no funcionara en un virtualenv 3.x
<div>
```
import sys
import IPython
import numpy as np
import pandas as pd
import sklearn as sk
print('Python version: %s.%s.%s' % sys.version_info[:3])
print( 'IPython version:', IPython.__version__)
print( 'numpy version:', np.__version__)
print( 'pandas version:', pd.__version__)
print( 'scikit-learn version:', sk.__version__)
```
This is a helper class for running paramater grid search across different classification or regression models. The helper takes two dictionaries as its constructor parameters. The first dictionary contains the models to be scored, while the second contains the parameters for each model (see examples below or the [GridSearchCV documentation](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.GridSearchCV.html) for the expected format). The `fit(X, y)` method runs a parameter grid search with cross validation for each model and for the given training data. After calling `fit(X, y)`, the `score_summary()` method returns a data frame with a summary of the scores.
```
from sklearn.grid_search import GridSearchCV
class EstimatorSelectionHelper:
def __init__(self, models, params):
if not set(models.keys()).issubset(set(params.keys())):
missing_params = list(set(models.keys()) - set(params.keys()))
raise ValueError("Some estimators are missing parameters: %s" % missing_params)
self.models = models
self.params = params
self.keys = models.keys()
self.grid_searches = {}
def fit(self, X, y, cv=3, n_jobs=1, verbose=1, scoring=None, refit=False):
for key in self.keys:
print("Running GridSearchCV for %s." % key)
model = self.models[key]
params = self.params[key]
gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,
verbose=verbose, scoring=scoring, refit=refit)
gs.fit(X,y)
self.grid_searches[key] = gs
def score_summary(self, sort_by='mean_score'):
def row(key, scores, params):
d = {
'estimator': key,
'min_score': min(scores),
'max_score': max(scores),
'mean_score': mean(scores),
'std_score': std(scores),
}
return pd.Series(dict(params.items() + d.items()))
rows = [row(k, gsc.cv_validation_scores, gsc.parameters)
for k in self.keys
for gsc in self.grid_searches[k].grid_scores_]
df = pd.concat(rows, axis=1).T.sort([sort_by], ascending=False)
columns = ['estimator', 'min_score', 'mean_score', 'max_score', 'std_score']
columns = columns + [c for c in df.columns if c not in columns]
return df[columns]
```
Classification example
----
I load the data.
```
from sklearn import datasets
iris = datasets.load_iris()
X_iris = iris.data
y_iris = iris.target
print ("Los datos son : " , iris.data[0:5])
```
Definimos dos diccionarios.
- Diccionario de modelos.
- Diccionario de juegos de parámetros (GridSearch) a probar con cada modelo.
```
from sklearn.ensemble import (ExtraTreesClassifier, RandomForestClassifier,
AdaBoostClassifier, GradientBoostingClassifier)
from sklearn.svm import SVC
models1 = {
'ExtraTreesClassifier': ExtraTreesClassifier(),
'RandomForestClassifier': RandomForestClassifier(),
'AdaBoostClassifier': AdaBoostClassifier(),
'GradientBoostingClassifier': GradientBoostingClassifier(),
'SVC': SVC()
}
params1 = {
'ExtraTreesClassifier': { 'n_estimators': [16, 32] },
'RandomForestClassifier': { 'n_estimators': [16, 32] },
'AdaBoostClassifier': { 'n_estimators': [16, 32] },
'GradientBoostingClassifier': { 'n_estimators': [16, 32], 'learning_rate': [0.8, 1.0] },
'SVC': [
{'kernel': ['linear'], 'C': [1, 10]},
{'kernel': ['rbf'], 'C': [1, 10], 'gamma': [0.001, 0.0001]},
]
}
```
I create the helper and fit the data.
```
helper1 = EstimatorSelectionHelper(models1, params1)
helper1.fit(X_iris, y_iris, scoring='f1', n_jobs=2)
```
Finally, I print the summary.
```
helper1.score_summary(sort_by='min_score')
```
Regression example
----
I load the data.
```
diabetes = datasets.load_diabetes()
X_diabetes = diabetes.data
y_diabetes = diabetes.target
```
I define the models and the grid search parameters.
```
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso
models2 = {
'LinearRegression': LinearRegression(),
'Ridge': Ridge(),
'Lasso': Lasso()
}
params2 = {
'LinearRegression': { },
'Ridge': { 'alpha': [0.1, 1.0] },
'Lasso': { 'alpha': [0.1, 1.0] }
}
```
I create the helper and fit the data.
```
helper2 = EstimatorSelectionHelper(models2, params2)
helper2.fit(X_diabetes, y_diabetes, n_jobs=-1)
```
Finally, I print the summary.
```
helper2.score_summary()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ariG23498/G-SimCLR/blob/master/Imagenet_Subset/Vanilla_SimCLR/Linear_Evaluation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Imports and setup
```
import tensorflow as tf
print(tf.__version__)
!nvidia-smi
!pip install -q wandb
# Other imports
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
import matplotlib.pyplot as plt
from imutils import paths
from tqdm import tqdm
import tensorflow as tf
import seaborn as sns
import numpy as np
import cv2
# Random seed fixation
tf.random.set_seed(666)
np.random.seed(666)
# Authorize wandb
import wandb
wandb.login()
from wandb.keras import WandbCallback
```
## Dataset gathering and preparation
```
# Gather dataset
!git clone https://github.com/thunderInfy/imagenet-5-categories
# Train and test image paths
train_images = list(paths.list_images("imagenet-5-categories/train"))
test_images = list(paths.list_images("imagenet-5-categories/test"))
print(len(train_images), len(test_images))
def prepare_images(image_paths):
images = []
labels = []
for image in tqdm(image_paths):
image_pixels = plt.imread(image)
image_pixels = cv2.resize(image_pixels, (224, 224))
image_pixels = image_pixels/255.
label = image.split("/")[2].split("_")[0]
images.append(image_pixels)
labels.append(label)
images = np.array(images)
labels = np.array(labels)
print(images.shape, labels.shape)
return images, labels
X_train, y_train = prepare_images(train_images)
X_test, y_test = prepare_images(test_images)
le = LabelEncoder()
y_train_enc = le.fit_transform(y_train)
y_test_enc = le.transform(y_test)
```
## Utilities
```
# Architecture utils
def get_resnet_simclr(hidden_1, hidden_2, hidden_3):
base_model = tf.keras.applications.ResNet50(include_top=False, weights=None, input_shape=(224, 224, 3))
base_model.trainable = True
inputs = Input((224, 224, 3))
h = base_model(inputs, training=False)
h = GlobalAveragePooling2D()(h)
projection_1 = Dense(hidden_1)(h)
projection_1 = Activation("relu")(projection_1)
projection_2 = Dense(hidden_2)(projection_1)
projection_2 = Activation("relu")(projection_2)
projection_3 = Dense(hidden_3)(projection_2)
resnet_simclr = Model(inputs, projection_3)
return resnet_simclr
!wget https://github.com/ariG23498/G-SimCLR/releases/download/v3.0/ImageNet_Subset_Deep_Autoencoder.zip
!unzip -qq ImageNet_Subset_Deep_Autoencoder.zip
resnet_simclr = tf.keras.models.load_model('ImageNet_Subset_Deep_Autoencoder/vanilla_simclr_imagenet_subset.h5')
resnet_simclr.summary()
def plot_training(H):
with plt.xkcd():
plt.plot(H.history["loss"], label="train_loss")
plt.plot(H.history["val_loss"], label="val_loss")
plt.plot(H.history["accuracy"], label="train_acc")
plt.plot(H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.show()
def get_linear_model(features):
linear_model = Sequential([Dense(5, input_shape=(features, ), activation="softmax")])
return linear_model
```
## Evaluation
```
resnet_simclr.layers[1].trainable = False
resnet_simclr.summary()
# Early Stopping to prevent overfitting
es = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=2, verbose=2, restore_best_weights=True)
# These layers will not be trained during linear evaluation
resnet_simclr.layers[3].trainable = False
resnet_simclr.layers[5].trainable = False
# Encoder model with non-linear projections
projection = Model(resnet_simclr.input, resnet_simclr.layers[-2].output)
print(projection.summary())
# Extract train and test features
train_features = projection.predict(X_train)
test_features = projection.predict(X_test)
print(train_features.shape, test_features.shape)
# Initialize wandb
wandb.init(entity="g-simclr", project="g-simclr", id="imagenet-s-simclr-le-1")
# Linear model
linear_model = get_linear_model(128)
linear_model.compile(loss="sparse_categorical_crossentropy", metrics=["accuracy"],
optimizer="adam")
history = linear_model.fit(train_features, y_train_enc,
validation_data=(test_features, y_test_enc),
batch_size=64,
epochs=100,
callbacks=[es, WandbCallback()])
plot_training(history)
# Encoder model with lesser non-linearity
projection = Model(resnet_simclr.input, resnet_simclr.layers[-4].output)
print(projection.summary())
# Extract train and test features
train_features = projection.predict(X_train)
test_features = projection.predict(X_test)
print(train_features.shape, test_features.shape)
# Initialize wandb
wandb.init(entity="g-simclr", project="g-simclr", id="imagenet-s-simclr-le-2")
linear_model = get_linear_model(256)
linear_model.compile(loss="sparse_categorical_crossentropy", metrics=["accuracy"],
optimizer="adam")
history = linear_model.fit(train_features, y_train_enc,
validation_data=(test_features, y_test_enc),
batch_size=64,
epochs=35,
callbacks=[es, WandbCallback()])
plot_training(history)
# Encoder model with no projection
projection = Model(resnet_simclr.input, resnet_simclr.layers[-6].output)
print(projection.summary())
# Extract train and test features
train_features = projection.predict(X_train)
test_features = projection.predict(X_test)
print(train_features.shape, test_features.shape)
# Initialize wandb
wandb.init(entity="g-simclr", project="g-simclr", id="imagenet-s-simclr-le-3")
linear_model = get_linear_model(2048)
linear_model.compile(loss="sparse_categorical_crossentropy", metrics=["accuracy"],
optimizer="adam")
history = linear_model.fit(train_features, y_train_enc,
validation_data=(test_features, y_test_enc),
batch_size=64,
epochs=35,
callbacks=[es, WandbCallback()])
plot_training(history)
```
| github_jupyter |
# Evolutionnary Hierarchical Dirichlet Processes for Multiple Correlated Time Varying Corpora
## Introduction
-----------------
Le notebook suivant est l'implémentation du code de l'article EvoHDP, réalisé par J.Zhang,Y.Song & al et est testé : <br\>
- sur les données synthétiques indiqués par l'article
- sur des courts documents ayant un thème particulier
- sur des résumés de la séries Game Of Thrones <br\>
L'article est accessible grâce au lien suivant :
<br\>
http://www.shixialiu.com/publications/evohdp/paper.pdf
<br\> <br\>
Les détails et rappels mathématiques sont donnés au fur et à mesure de la rédaction du code
Les indications (telles que "voir Table x" ou "voir (xx)") font référence à l'article
La plupart des cellules terminent par une ligne de code test de la fonction implémentée juste au dessus et peuvent être dé-commentées pour comprendre le modèle "step by step".
-----------------
```
import numpy as np
from scipy.stats import multinomial
from scipy.special import gammaln
import copy
import math
import mpmath
import os
from sklearn.feature_extraction.text import CountVectorizer
import time
from tqdm import tqdm
import random
import pandas as pd
# les données sont organisées sous cette forme : data=[T][J][[doc_t_j_1],[doc_t_j_2],...]
```
# Experiments on small documents
```
stop_words=['l','d','s','de','un', 'une','alors','au','aucuns','aussi','autre','avant','avec','avoir','bon','car','ce','cela','ces','ceux','chaque','ci','comme','comment','dans','des','du','dedans','dehors'
,'depuis','devrait','doit','donc','dos','début','elle','elles','en','encore','essai','est','et','eu','fait','faites','fois','font','hors','ici','il','ils','je','juste'
,'la','le','les','leur','là','ma','maintenant','mais','mes','mine','moins','mon','mot','même','ni','nommés','notre','nous','ou','où','par','parce','pas','peut','peu','plupart','pour','pourquoi','quand','que','quel','quelle','quelles','quels','qui','sa','sans','ses','seulement'
,'si','sien','son','sont','sous','soyez','sujet','sur','ta','tandis','tellement','tels','tes','ton','tous','tout','trop','très','tu','voient','vont','votre','vous','vu','ça','étaient','état','étions','été','être',"a","abord","absolument","afin","ah","ai","aie","aient","aies"
,"ailleurs","ainsi","ait","allaient","allo","allons","allô","alors","anterieur","anterieure","anterieures","apres","après","as","assez","attendu","au","aucun","aucune","aucuns","aujourd","aujourd'hui","aupres","auquel","aura","aurai","auraient","aurais","aurait","auras"
,"aurez","auriez","aurions","aurons","auront","aussi","autre","autrefois","autrement","autres","autrui","aux","auxquelles","auxquels","avaient","avais","avait","avant","avec","avez","aviez","avions","avoir","avons","ayant","ayez","ayons","b","bah","bas","basee","bat"
,"beau","beaucoup","bien","bigre","bon","boum","bravo","brrr","c","car","ce","ceci","cela","celle","celle-ci","celle-là","celles","celles-ci","celles-là","celui","celui-ci","celui-là","celà","cent","cependant","certain","certaine","certaines","certains","certes","ces","cet"
,"cette","ceux","ceux-ci","ceux-là","chacun","chacune","chaque","cher","chers","chez","chiche","chut","chère","chères","ci","cinq","cinquantaine","cinquante","cinquantième","cinquième","clac","clic","combien","comme","comment","comparable","comparables","compris","concernant"
,"contre","couic","crac","d","da","dans","de","debout","dedans","dehors","deja","delà","depuis","dernier","derniere","derriere","derrière","des","desormais","desquelles","desquels","dessous","dessus","deux","deuxième","deuxièmement","devant","devers","devra","devrait","different"
,"differentes","differents","différent","différente","différentes","différents","dire","directe","directement","dit","dite","dits","divers","diverse","diverses","dix","dix-huit","dix-neuf","dix-sept","dixième","doit","doivent","donc","dont","dos","douze","douzième","dring","droite"
,"du","duquel","durant","dès","début","désormais","e","effet","egale","egalement","egales","eh","elle","elle-même","elles","elles-mêmes","en","encore","enfin","entre","envers","environ","es","essai","est","et","etant","etc","etre","eu","eue","eues","euh","eurent","eus","eusse"
,"eussent","eusses","eussiez","eussions","eut","eux","eux-mêmes","exactement","excepté","extenso","exterieur","eûmes","eût","eûtes","f","fais","faisaient","faisant","fait","faites","façon","feront","fi","flac","floc","fois","font","force","furent","fus","fusse","fussent","fusses","fussiez"
,"fussions","fut","fûmes","fût","fûtes","g","gens","h","ha","haut","hein","hem","hep","hi","ho","holà","hop","hormis","hors","hou","houp","hue","hui","huit","huitième","hum","hurrah","hé","hélas"
,"i","ici","il","ils","importe","j","je","jusqu","jusque","juste","k","l","la","laisser","laquelle","las","le","lequel","les","lesquelles","lesquels","leur","leurs","longtemps","lors","lorsque","lui","lui-meme","lui-même","là","lès","m","ma","maint","maintenant","mais","malgre","malgré","maximale","me","meme","memes","merci","mes","mien","mienne","miennes","miens","mille","mince","mine","minimale","moi","moi-meme","moi-même","moindres","moins","mon","mot","moyennant","multiple","multiples","même","mêmes","n","na","naturel","naturelle","naturelles","ne","neanmoins","necessaire","necessairement","neuf","neuvième","ni","nombreuses","nombreux","nommés","nos","notamment","notre","nous","nous-mêmes","nouveau","nouveaux","nul","néanmoins","nôtre","nôtres","o","oh","ohé","ollé","olé","on","ont","onze","onzième","ore","ou","ouf","ouias","oust","ouste","outre","ouvert","ouverte","ouverts","o|","où","p","paf","pan","par","parce","parfois","parle","parlent","parler","parmi","parole","parseme","partant","particulier","particulière","particulièrement","pas","passé","pendant","pense","permet","personne","personnes","peu","peut","peuvent","peux","pff","pfft","pfut","pif","pire","pièce","plein","plouf","plupart","plus","plusieurs","plutôt","possessif","possessifs","possible","possibles","pouah","pour","pourquoi","pourrais","pourrait","pouvait","prealable","precisement","premier","première","premièrement","pres","probable","probante","procedant","proche","près","psitt","pu","puis","puisque","pur","pure","q","qu","quand","quant","quant-à-soi","quanta","quarante","quatorze","quatre","quatre-vingt","quatrième","quatrièmement","que","quel","quelconque","quelle","quelles","quelqu'un","quelque","quelques","quels","qui","quiconque","quinze","quoi","quoique","r","rare","rarement","rares","relative","relativement","remarquable","rend","rendre","restant","reste","restent","restrictif","retour","revoici","revoilà","rien","s","sa","sacrebleu","sait","sans","sapristi","sauf","se","sein","seize","selon","semblable","semblaient","semble","semblent","sent","sept","septième","sera","serai","seraient","serais","serait","seras","serez","seriez","serions","serons","seront","ses","seul","seule","seulement","si","sien","sienne","siennes","siens","sinon","six","sixième","soi","soi-même","soient","sois","soit","soixante","sommes","son","sont","sous","souvent","soyez","soyons","specifique","specifiques","speculatif","stop","strictement","subtiles","suffisant","suffisante","suffit","suis","suit","suivant","suivante","suivantes","suivants","suivre","sujet","superpose","sur","surtout","t","ta","tac","tandis","tant","tardive","te","tel","telle","tellement","telles","tels","tenant","tend","tenir","tente","tes","tic","tien","tienne","tiennes","tiens","toc","toi","toi-même","ton","touchant"
,"toujours","tous","tout","toute","toutefois","toutes","treize","trente","tres","trois","troisième","troisièmement","trop","très","tsoin","tsouin","tu","té","u","un","une","unes","uniformement","unique","uniques","uns","v","va","vais","valeur","vas","vers","via","vif","vifs","vingt","vivat","vive","vives","vlan","voici","voie","voient","voilà","vont","vos","votre","vous","vous-mêmes","vu","vé","vôtre","vôtres","w","x","y","z","zut","à","â","ça","ès","étaient","étais","était","étant","état","étiez","étions","été","étée","étées","étés","êtes","être","ô"]
def generate_easy_data(stop_words):
path_doc='Test_musique_sport/ex_simple'
doc_file='Test_musique_sport/name_basket'
filename=open(doc_file,'r').readlines()
filename = [os.path.join(path_doc,filename[i].replace('\n','')) for i in range(len(filename))]
vectorizer=CountVectorizer(input='filename',max_df=0.9,stop_words=stop_words)
tf=vectorizer.fit_transform(filename).todense() #tf for documents
name_word=vectorizer.get_feature_names()
d11=[tf[4,:].tolist()[0],tf[5,:].tolist()[0]]
d12=[tf[6,:].tolist()[0],tf[7,:].tolist()[0]]
d21=[tf[10,:].tolist()[0],tf[11,:].tolist()[0]]
d22=[tf[8,:].tolist()[0],tf[9,:].tolist()[0]]
d31=[tf[0,:].tolist()[0],tf[1,:].tolist()[0]]
d32=[tf[2,:].tolist()[0],tf[3,:].tolist()[0]]
data=[[d11,d12],[d21,d22],[d31,d32]]
T=len(data)
J=len(data[0])
W=len(data[0][0][0])
return(data,T,J,W,name_word)
#data,T,J,W,name_word=generate_easy_data(stop_words)
```
# Experiments on GOT documents
```
def generate_GOT_data(stop_words):
path_doc='Test_GOT/txt_GOT'
doc_file='Test_GOT/name_GOT_txt'
filename=open(doc_file,'r').readlines()
filename = [os.path.join(path_doc,filename[i].replace('\n','')) for i in range(len(filename))]
vectorizer=CountVectorizer(input='filename',max_df=0.9,stop_words=stop_words)
tf=vectorizer.fit_transform(filename).todense() #tf for documents
name_word=vectorizer.get_feature_names()
data=[]
name_doc=[]
for i in range(6):
docs_tempsi=[]
name_tempsi=[]
for j in range(10):
docs_tempsi.append(tf[i*10+j,:].tolist()[0])
name_tempsi.append(filename[i*10 + j])
data.append([docs_tempsi])
name_doc.append(name_tempsi)
docs_tempsi=[]
name_tempsi=[]
for j in range(7):
docs_tempsi.append(tf[6*10+j,:].tolist()[0])
name_tempsi.append(filename[6*10 + j])
data.append([docs_tempsi])
name_doc.append(name_tempsi)
T=len(data)
J=len(data[0])
W=len(data[0][0][0])
return(data,name_doc,T,J,W,name_word)
#data,name_doc,T,J,W,name_word=generate_GOT_data(stop_words)
```
# Experiments on synthetic data
Les données synthétiques sont une mixture de multinomiales, de paramètres $\phi_k$ indiqué en Table 1 et repris ci-dessous.
```
true_phi=np.zeros((8,2))
true_phi[0]=[0.1,0.9]
true_phi[1]=[0.2,0.8]
true_phi[2]=[0.3,0.7]
true_phi[3]=[0.4,0.6]
true_phi[4]=[0.5,0.5]
true_phi[5]=[0.6,0.4]
true_phi[6]=[0.7,0.3]
true_phi[7]=[0.8,0.2]
T=4
J=3
W=2
K=40
# On crée un liste Info_data_sample=[T][J][local_components,size_corpora]
corpora_sizes=[[500,300,400],[510,320,430],[520,320,430],[530,340,450]]
def local_components_and_corpora_sizes(T,J,corpora_sizes):
info_data=[]
for t in range(T):
info_data_t=[]
for j in range(J):
info_data_j=[]
for k in range(3):
info_data_j.append(j+k+t)
info_data_j.append(corpora_sizes[t][j])
info_data_t.append(info_data_j)
info_data.append(info_data_t)
return(info_data)
#info_data=local_components_and_corpora_sizes(T,J,corpora_sizes)
#info_data
#data=[T][J][[doc_t_j_1],[doc_t_j_2],...]
def mixture_of_three_multinomial(liste_of_phi_indices,true_phi,corpora_size,z):
mult1=np.random.multinomial(200,true_phi[liste_of_phi_indices[0]],size=z[0]).tolist()
mult2=np.random.multinomial(200,true_phi[liste_of_phi_indices[1]],size=z[1]).tolist()
mult3=np.random.multinomial(200,true_phi[liste_of_phi_indices[2]],size=z[2]).tolist()
mixt_mult_float=np.concatenate((np.concatenate((mult1,mult2),axis=0),mult3),axis=0)
#print(mixt_mult_float)
mixt_mult_int=[[mixt_mult_float[t][j].tolist() for j in range(len(mixt_mult_float[t]))] for t in range(len(mixt_mult_float))]
return(mixt_mult_int)
def generate_data_from_mixture_of_multinomials(T,J,info_data,true_phi):
data=[]
for t in range(T):
data_t=[]
for j in range(J):
z=np.random.multinomial(info_data[t][j][3],[1/3,1/3,1/3])
doc_t_j=mixture_of_three_multinomial(info_data[t][j],true_phi,info_data[t][j][3],z)
data_t.append(doc_t_j)
#data_t.append(data_j)
data.append(data_t)
return(data)
#data=generate_data_from_mixture_of_multinomials(T,J,info_data,true_phi)
#data
```
L'objectif de cette expérimentation est de retrouver les "true_phi" par l'algorithme EvoHDP. <br\>Ces "true_phi" ont été utilisé pour générer nos données.
# Initialize hyper parameters
Pour l'initialisation, le modèle est celui d'un HDP à trois niveaux :
$$ H \sim Dir ( 1/W) $$
$$ G \sim DP(\xi , H) $$
Pour chaque temps :
$$ \forall t \in T $$
$$ G_{0}^t \sim DP(\gamma^t , G) $$
Pour chaque corpus :
$$ \forall j \in J $$
$$ G_{j}^t \sim DP(\alpha_{0}^t , G_{0}^t) $$
On doit simuler pour l'inititalisation des paramètres :
$$ \xi \sim Gamma(10,1) $$
Pour chaque temps :
$$ \forall t \in T $$
$$ \eta^t \sim Gamma(10,1) $$
$$ \alpha_{0}^t \sim Gamma(10,1) $$
# Initialize parameters
$$ \xi \sim Gamma(10,1) $$
```
a_xi=10
b_xi=1
xi=np.random.gamma(a_xi,b_xi)
```
Pour chaque temps :
$$ \forall t \in T $$
$$ \gamma^t \sim Gamma(10,1) $$
$$ \alpha_{0}^t \sim Gamma(10,1) $$
```
T=4
a_gamma=10
b_gamma=1
a_alpha=10
b_alpha=1
gamma=[np.random.gamma(a_gamma,b_gamma) for i in range(T)]
alpha=[np.random.gamma(a_alpha,b_alpha) for i in range(T)]
```
On crée les time dependencies $v^t=w^t=a$ avec $a \in {0.1,0.3,0.5,0.7,0.9}$ et nous étudierons l'impact de cette variable.
```
#ici a=0.8
v=T*[K*[0.8]]
w=T*[0.8]
```
# Generate from stick breaking for initilization of measures
$$ H \sim Dir ( 1/W) $$
$$ G \sim DP(\xi , H) $$
$$ G = \sum_{k=1}^{\infty} \nu_k \delta_{\phi_k} $$
où :
$$ \nu \sim GEM(\xi) $$ et: $$ \phi_k \sim H $$
```
def stick_breaking(alpha, k, size_W):
if(alpha < 0): return("alpha must be positive")
betas = np.random.beta(1, alpha, k)
produit_1_beta = np.append(1, np.cumprod(1 - betas[:-1]))
p = betas * produit_1_beta
return(p/p.sum())
#nu=stick_breaking(xi,K,W)
#nu
```
$$ G = \sum_{k=1}^{\infty} \nu_k \delta_{\phi_k} $$
Une fois G simulé, on sait que :
$$ G_{0}^t = Dir( \gamma^{t} , G) $$
D'après l'approche stick breaking :
$$ G_{0}^t = \sum_{k=1}^\infty \beta_{k}^t\delta_{\phi_k} $$
où
$$ \beta^t \sim DP(\gamma^t,\hat{\beta}^t)$$$$\hat{\beta}^t=w^t\beta^{t-1}+(1-w^t)\nu$$$$ \nu\sim GEM(\xi) $$
Pour simuler $ G_{0}^t$, on défini les deux propriétés suivantes : <br/> <br/>
I. D'après la **propriété de normalisation** d'un processus de Dirichlet: <br/><br/>
**Si** $$ (X_1,...,X_d)\sim Dirichlet(\alpha_1,...,\alpha_d) $$
**Alors, pour k $\leq$ d**
$$ \dfrac{(X_1,...,X_k)}{\sum_{i\leq k}X_i} \sim Dirichlet(\alpha_1,...,\alpha_k) $$
<br/>
II. Lien entre **la loi de Dirichlet et la loi Beta**:<br/><br/>
**Si** $$(X_1,...,X_d)\sim Dir(\alpha_1,...,\alpha_d)$$
**Alors** $$\forall i \in [1,d],
X_i \sim Beta(\alpha_i,\alpha-\alpha_i),\alpha=\sum_{j=1}^d\alpha_j$$ <br/><br/>
Ainsi on a : <br/>
$$ \frac{\beta_k^t}{1-\sum_{i<k}\beta_i^t} \sim Beta(\gamma^t\hat{\beta}_k,\gamma^t(1-\sum_{i\leq k}\hat{\beta}_i))$$<br/>
Stick-Breaking donne :
$$ \tilde{\beta_k^t}/\hat{\beta}_1,...,\hat{\beta}_k \sim Beta(\gamma^t\hat{\beta}_k,\gamma^t(1-\sum_{i\leq k}\hat{\beta}_i)) $$
$$ \beta_k^t=\tilde{\beta_k^t}\prod_{i<k}(1-\tilde{\beta_i^t})$$<br\><br\>
```
def dirichlet_generate_random(params_dirich):
if(type(params_dirich)==list):
params_dirich=np.array(params_dirich)
liste_indice_non_zero=np.nonzero(params_dirich)
param_non_zero=params_dirich[params_dirich>0]
rand_dir=np.random.dirichlet(param_non_zero)
random_finale=np.zeros((len(params_dirich)))
random_finale[liste_indice_non_zero]=rand_dir
return(random_finale)
def beta_generate_random(params_beta):
if(type(params_beta)==list):
params_beta=np.array(params_beta)
if(len(params_beta)!=2):
print("ERROR, la taille des paramètres pour la simluation d'une beta est supérieur à 2")
if(params_beta[0]<=0):
return(1e-10)
if(params_beta[1]<=0):
return(1-1e-10)
random_final=np.random.beta(params_beta[0],params_beta[1])
if(random_final<=0):
random_final=1e-10
elif(random_final>=1) :
random_final=1-1e-10
return(random_final)
# voir (8)
def initialize_G_0_t (gamma,nu,T,K,w):
G_0_t=[]
for t in range(T):
if(t==0):
beta_t=[]
beta_tilde_t=[]
for k in range(K):
params_dirich=[gamma[t]*nu[k],gamma[t]*(1-np.sum(nu[:k+1]))]
beta_tilde_k_t=beta_generate_random(params_dirich)
beta_tilde_t.append(beta_tilde_k_t)
beta_k_t=beta_tilde_k_t*np.product(1-np.array(beta_tilde_t[:k]))
beta_t.append(beta_k_t)
G_0_t.append((beta_t/np.sum(beta_t)).tolist())
else:
beta_t=[]
beta_tilde_t=[]
beta_hat=w[t]*np.array(G_0_t[t-1])+(1-w[t])*nu
for k in range(K):
params_dirich=[gamma[t]*beta_hat[k],gamma[t]*(1-np.sum(beta_hat[:k+1]))]
beta_tilde_k_t=beta_generate_random(params_dirich)
beta_tilde_t.append(beta_tilde_k_t)
beta_k_t=beta_tilde_k_t*np.product(1-np.array(beta_tilde_t[:k]))
beta_t.append(beta_k_t)
G_0_t.append((beta_t/np.sum(beta_t)).tolist())
return(G_0_t)
#beta=initialize_G_0_t (gamma,nu,T,K,w)
```
Maintenant, on simule $G_j^t$ $$ \forall t \in T, \forall j \in J$$
$$ G_j^t=\sum_{k=1}^{\infty}\pi_{jk}^t\delta_{\phi_k}$$ $$\pi_j^t\sim DP(\alpha_0^t,\hat{\pi}^t_j)$$
$$\hat{\pi}^t_j=v_j^t\pi_j^{t-1}+(1-v_j^t)\beta^t$$
```
#voir (9)
def initialize_G_j_t (G_0_t,alpha,J,T,K,v):
G_j_T=[]
for t in range(T):
G_j_t=[]
if(t==0):
for j in range(J):
alpha_j_t=[]
alpha_tilde_j_t=[]
for k in range(K):
params_dirich=[alpha[t]*G_0_t[t][k],alpha[t]*(1-np.sum(G_0_t[t][:k+1]))]
alpha_tilde_k_t=beta_generate_random(params_dirich)
alpha_tilde_j_t.append(alpha_tilde_k_t)
alpha_k_t=alpha_tilde_k_t*np.product(1-np.array(alpha_tilde_j_t[:k]))
alpha_j_t.append(alpha_k_t)
G_j_t.append((alpha_j_t/(np.sum(alpha_j_t))).tolist())
G_j_T.append(G_j_t)
else:
for j in range(J):
alpha_j_t=[]
alpha_tilde_j_t=[]
alpha_hat=v[t][j]*np.array(G_j_T[t-1][j])+(1-v[t][j])*np.array(G_0_t[t][k])
for k in range(K):
params_dirich=[alpha[t]*alpha_hat[k],alpha[t]*(1-np.sum(alpha_hat[:k+1]))]
alpha_tilde_k_t=beta_generate_random(params_dirich)
alpha_tilde_j_t.append(alpha_tilde_k_t)
alpha_k_t=alpha_tilde_k_t*np.product(1-np.array(alpha_tilde_j_t[:k]))
alpha_j_t.append(alpha_k_t)
G_j_t.append((alpha_j_t/(np.sum(alpha_j_t))).tolist())
G_j_T.append(G_j_t)
return(G_j_T)
#pi=initialize_G_j_t(beta,alpha,J,T,K,v)
```
On peut maintenant obtenir $n_{jk}^t$ qui est le nombre de documents du corpus j au temps t qui ont été assignés au topic k (i.e # $z_{ij}^t$ : $z_{ij}^t=k$) <br/>
La fonction "compute_n_t_j" calcule $n_{jk}^t, \forall k \in K$ et retourne une liste de taille K
```
def compute_n_t_j(K,liste_des_Z_temps_t_corpus_j):
n_t_j=[]
for k in range(K):
n_t_j.append(liste_des_Z_temps_t_corpus_j.count(k))
return(n_t_j)
```
Une fois qu'on a initialisé les $\pi_{jk}^t$
,on initialise randomly les Z
La fonction "compute_Z_j_t" permet de calculer les probas normalisées d'un doc au temps t, pour le corpus j.<br/>
La fonction "compute_proba_z_i_j_t_is_k" étend ce calcul à tous les temps et corpus.<br/>
La fonction "log_proba_mult" n'est pas utilisée mais peut s'avérer utile pour éviter les arrondis.<br/>
<br/> Pour l'initialisation, Z est calculé sans information à posteriori
```
def randomly_assign_Z_initialisation(T,J,K,data):
Z=[]
n=[]
for t in range(T):
Z_t=[]
n_t=[]
for j in range(J):
Z_t_j=list(np.nonzero(np.random.multinomial(1,[1/K]*K,len(data[t][j])))[1])
Z_t.append(Z_t_j)
n_t.append(compute_n_t_j(K,Z_t_j))
Z.append(Z_t)
n.append(n_t)
return(Z,n)
#Z,N=randomly_assign_Z_initialisation(T,J,K,data)
```
La fonction "compute_T_jk_t_tplus1_et_T_jk_0_tplus1_multinomiale" calule : <br\> <br\> $$(T_{jk}^{t \Rightarrow t+1},T_{jk}^{0 \Rightarrow t+1}) \sim Multinomiale (T_{jk}^{t+1},[p,1-p]),(22)$$ <br\> avec $$p=\frac{v_j^{t+1}\pi_{jk}^t}{(1-v_j^{t+1})\beta_k^{t+1} + v_j^{t+1}\pi_{jk}^t} $$
```
def compute_T_jk_t_tplus1_et_T_jk_0_tplus1_multinomiale(T_jk_Tplus1,v_j_Tplus1,pi_jk_t,beta_k_Tplus1):
if(((1-v_j_Tplus1)*beta_k_Tplus1+v_j_Tplus1*pi_jk_t)!=0):
p=(v_j_Tplus1*pi_jk_t)/((1-v_j_Tplus1)*beta_k_Tplus1+v_j_Tplus1*pi_jk_t)
else:
p=0
T_jk_t_tplus1,T_jk_0_tplus1=np.random.multinomial(T_jk_Tplus1, [p,1-p])
return(T_jk_t_tplus1,T_jk_0_tplus1)
def compute_M_jk_t_tplus1_et_M_jk_0_tplus1_multinomiale(M_k_Tplus1,w_Tplus1,beta_k_t,nu_k):
if(((1-w_Tplus1)*nu_k+w_Tplus1*beta_k_t)!=0):
q=(w_Tplus1*beta_k_t)/(((1-w_Tplus1)*nu_k)+(w_Tplus1*beta_k_t))
else:
q=0
M_k_t_tplus1,Mk_0_tplus1=np.random.multinomial(M_k_Tplus1, [q,1-q])
return(M_k_t_tplus1,Mk_0_tplus1)
```
$$ N_{jk}^t=n_{jk}^t+T_{jk}^{t \Rightarrow t+1}$$
$n_{jk}^t$ est le nombre de documents du corpus j assignés au topic k au temps t <br\>
$T_{jk}^{t \Rightarrow t+1}$ est le nombre de tables qui ont été crées avec les menus du temps t. <br\><br\>
$$ T_{jk}^t/\beta_k^t,\pi_{jk}^{t-1},N_{jk}^t\sim CRP (\alpha_0^t v_j^t\pi_{jk}^{t-1} + \alpha_0^t(1- v_j^t)\beta_k^t,N_{jk}^t)$$
```
# Generate table assignments for `num_customers` customers, according to
# a Chinese Restaurant Process with dispersion parameter `alpha`.
def chinese_restaurant_process(num_customers, alpha):
if (num_customers <= 0 or alpha<0) :
return(0)
elif(alpha==0):
#print("alpha == 0")
return(0)
else :
T_jk_t=0
for i in range(num_customers):
if(np.random.rand()<alpha/(alpha+i)):
T_jk_t+=1
return(T_jk_t)
T_jk_t=chinese_restaurant_process(100,15)
#num_customers=248
#alpha_test=alpha[0]
#T_jk_t=chinese_restaurant_process(num_customers,alpha_test)
#T_jk_t
```
La fonction suivante retourne une liste (dim K) de listes (dim 3) contenant : $$T_{jk}^{t\Rightarrow t+1},T_{jk}^{0\Rightarrow t+1},T_{jk}^t$$
```
def compute_T_tP1_T_0t_Tjkt(temps0,tempsT,T_jk_ttp1,n_jk_t,v_j_t,pi_jk_T_moins1,beta_k_T,alpha_t):
T_3=[]
Nu=[]
if(temps0):
for k in range(len(n_jk_t)):
Nu_jk_t=n_jk_t[k]+T_jk_ttp1[k]
Nu.append(Nu_jk_t)
param_CRP=(alpha_t*beta_k_T[k])
T_0_jk=chinese_restaurant_process(Nu_jk_t,param_CRP)
T_3.append([0,T_0_jk,T_0_jk])
elif(tempsT):
for k in range(len(n_jk_t)):
Nu_jk_t=n_jk_t[k]
Nu.append(Nu_jk_t)
param_CRP=(alpha_t*v_j_t[k]*pi_jk_T_moins1[k])+(alpha_t*(1-v_j_t[k])*beta_k_T[k])
T_0_jk=chinese_restaurant_process(Nu_jk_t,param_CRP)
T_jk_t_tplus1,T_jk_0_tplus1=compute_T_jk_t_tplus1_et_T_jk_0_tplus1_multinomiale(T_0_jk,v_j_t[k],pi_jk_T_moins1[k],beta_k_T[k])
T_3.append([T_jk_t_tplus1,T_jk_0_tplus1,T_0_jk])
else:
for k in range(len(n_jk_t)):
Nu_jk_t=n_jk_t[k]+T_jk_ttp1[k]
Nu.append(Nu_jk_t)
param_CRP=(alpha_t*v_j_t[k]*pi_jk_T_moins1[k])+(alpha_t*(1-v_j_t[k])*beta_k_T[k])
T_0_jk=chinese_restaurant_process(Nu_jk_t,param_CRP)
T_jk_t_tplus1,T_jk_0_tplus1=compute_T_jk_t_tplus1_et_T_jk_0_tplus1_multinomiale(T_0_jk,v_j_t[k],pi_jk_T_moins1[k],beta_k_T[k])
T_3.append([T_jk_t_tplus1,T_jk_0_tplus1,T_0_jk])
return(T_3,Nu)
```
Les fonctions suivantes calculent les Métatables :
<br/> <br/> La dimension de Métatable est une liste : T*K*3
La fonction "compute_T_jk_t_tplus1_et_T_jk_0_tplus1_multinomiale" calule : <br\> <br\> $$(M_{k}^{t \Rightarrow t+1},M_{k}^{0 \Rightarrow t+1}) \sim Multinomiale (M_{k}^{t+1},[q,1-q]),(25)$$ <br\> avec $$q=\frac{w^{t+1}\beta_{k}^t}{(1-w^{t+1})\nu_k + w^{t+1}\beta{k}^t} $$
```
def compute_M_tP1_T_0t_Tjkt(t,temps0,tempsT,M_k_ttp1,Tables,w_t,beta_tmoins1_k,gamma_t,nu,K):
M_3=[]
Tau=[]
if(temps0):
for k in range(K):
Tau_t_k=np.sum(np.array(Tables)[t,:,k,1])+M_k_ttp1[k]
Tau.append(Tau_t_k)
param_CRP=(gamma_t*nu[k])
M_tk=chinese_restaurant_process(Tau_t_k,param_CRP)
M_3.append([0,M_tk,M_tk])
elif(tempsT):
for k in range(K):
Tau_t_k=np.sum(np.array(Tables)[t,:,k,1])
Tau.append(Tau_t_k)
param_CRP=(gamma_t*w_t*beta_tmoins1_k[k])+(gamma_t*(1-w_t)*nu[k])
M_tk=chinese_restaurant_process(Tau_t_k,param_CRP)
M_jk_t_tplus1,M_jk_0_tplus1=compute_M_jk_t_tplus1_et_M_jk_0_tplus1_multinomiale(M_tk,w_t,beta_tmoins1_k[k],nu[k])
M_3.append([M_jk_t_tplus1,M_jk_0_tplus1,M_tk])
else:
for k in range(K):
Tau_t_k=np.sum(np.array(Tables)[t,:,k,1])+M_k_ttp1[k]
Tau.append(Tau_t_k)
param_CRP=(gamma_t*w_t*beta_tmoins1_k[k])+(gamma_t*(1-w_t)*nu[k])
M_tk=chinese_restaurant_process(Tau_t_k,param_CRP)
M_jk_t_tplus1,M_jk_0_tplus1=compute_M_jk_t_tplus1_et_M_jk_0_tplus1_multinomiale(M_tk,w_t,beta_tmoins1_k[k],nu[k])
M_3.append([M_jk_t_tplus1,M_jk_0_tplus1,M_tk])
return(M_3,Tau)
def compute_Tables_Metatables(T,J,v,w,pi,beta,alpha,gamma,nu,n,K):
Table=[]
MetaTable=[]
Nu=[]
for t in range(T-1,-1,-1):
T_t=[]
Nu_t=[]
temps_info=t
if (temps_info==T-1):
for j in range(J):
T_tj,Nu_t_j=compute_T_tP1_T_0t_Tjkt(0,1,0,n[t][j],v[t],pi[t-1][j],beta[t],alpha[t])
T_t.append(T_tj)
Nu_t.append(Nu_t_j)
elif(temps_info==0):
for j in range(J):
T_tj,Nu_t_j=compute_T_tP1_T_0t_Tjkt(1,0,np.array(Table)[T-t-2,j,:,0],n[t][j],v[t],0,beta[t],alpha[t])
T_t.append(T_tj)
Nu_t.append(Nu_t_j)
else:
for j in range(J):
T_tj,Nu_t_j=compute_T_tP1_T_0t_Tjkt(0,0,np.array(Table)[T-t-2,j,:,0],n[t][j],v[t],pi[t-1][j],beta[t],alpha[t])
Nu_t.append(Nu_t_j)
T_t.append(T_tj)
Table.append(T_t)
Nu.append(Nu_t)
Nu=Nu[::-1]
Table=Table[::-1]
Tau=[]
for t in range(T-1,-1,-1):
temps_info=t
if (temps_info==T-1):
M_t,Tau_t=compute_M_tP1_T_0t_Tjkt(t,0,1,0,Table,w[t],beta[t-1],gamma[t],nu,K)
MetaTable.append(M_t)
Tau.append(Tau_t)
elif(temps_info==0):
M_t,Tau_t=compute_M_tP1_T_0t_Tjkt(t,1,0,np.array(MetaTable)[T-2-t,:,0],Table,w[t],0,gamma[t],nu,K)
MetaTable.append(M_t)
Tau.append(Tau_t)
else:
M_t,Tau_t=compute_M_tP1_T_0t_Tjkt(t,0,0,np.array(MetaTable)[T-2-t,:,0],Table,w[t],beta[t-1],gamma[t],nu,K)
MetaTable.append(M_t)
Tau.append(Tau_t)
Tau=Tau[::-1]
MetaTable=MetaTable[::-1]
return(Table,MetaTable,Tau,Nu)
#Tables,MetaTable,Tau,Nu=compute_Tables_Metatables(T,J,v,w,pi,beta,alpha,gamma,nu,N,len(beta[1]))
```
# Sampling $\nu$
Une fois les Tables et Metatables calculés, on va réapproximer les poids. <br/> <br/> $$M_k = \sum_t M_{k}^t$$
<br/> $$M = \sum_k M_{k}$$
$$G/\xi,H,( M_k )_{k=1}^{K} \sim DP(\xi+M,\frac{H+\sum_{k=1}^K M_k \delta_{\phi_k}}{\xi + M})$$
où K est le nombre de plats distincts sur toutes les métatables. On peut représenter G de la façon suivante :
$$ G = \sum_{k=1}^K \nu_k \delta_{\phi_k} + \nu_u G_u$$$$ G_u\sim DP(\xi,H) $$$$ \nu=(\nu_1,...,\nu_K,\nu_u)\sim Dirichlet(M_1,...,M_K,\xi)$$ <br\>On simule donc $\nu$ selon une loi de dirichlet de paramètres $M_1,...,M_k,M_u$
**Calcul de $\beta$ ** <br\>
Une fois qu'on a réduit les dimensions de nos objets et conservé seulement les topics intéressants, on sample $\beta^t$ selon 14 <br\>
$$(\beta_u^t,\beta_1^t,...,\beta_K^t)\sim Dirichlet(\tilde{\gamma^t}.(\tilde{\beta_u^t},\tilde{\beta_1^t},...,\tilde{\beta_K^t}))$$ avec
$$\tilde{\gamma^t}=\gamma^t+ TAU^t_.$$ et
$$ \tilde{\beta_k^t} = \frac{1}{\tilde{\gamma^t}}(\gamma^t w^t \beta_k^{t-1} + \gamma^t(1 - w^t)\nu_k+ TAU^t_k)$$
<br\>
et
<br\>
$$ \tilde{\beta_u^t} = \frac{1}{\tilde{\gamma^t}}(\gamma^t w^t \beta_u^{t-1} + \gamma^t(1 - w^t)\nu_u)$$
Tau et Nu sont calculés plus haut, on s'en sert pour calculé les tildes <br\> Les fonctions suivantes calculent respectivement, $\tilde{\gamma}$ , $\tilde{\beta}$ et $\beta$
```
def compute_gamma_tilde(gamma,Tau):
gamma_tilde=gamma+np.sum(np.array(Tau),axis=1)
return(gamma_tilde)
#gamma_tilde=compute_gamma_tilde(gamma,Tau)
def compute_beta_t_tilde(t,gamma_t,gamma_tilde_t,w_t,beta_tmoins1,nu,tau_t):
beta_t_tilde=[]
if(t!=0):
for k in range(len(nu)-1):
if(gamma_tilde_t>0):
beta_t_k_tilde=(1/gamma_tilde_t)*((gamma_t*w_t*beta_tmoins1[k])+(gamma_t*(1-w_t)*nu[k]+tau_t[k]))
beta_t_tilde.append(beta_t_k_tilde)
else:
beta_t_tilde.append(0)
if(gamma_tilde_t>0):
beta_t_u_tilde=(1/gamma_tilde_t)*((gamma_t*w_t*beta_tmoins1[len(nu)-1])+(gamma_t*(1-w_t)*nu[len(nu)-1]))
beta_t_tilde.append(beta_t_u_tilde)
else:
beta_t_tilde.append(0)
else:
for k in range(len(nu)-1):
if(gamma_tilde_t):
beta_t_k_tilde=(1/gamma_tilde_t)*(gamma_t*nu[k]+tau_t[k])
beta_t_tilde.append(beta_t_k_tilde)
else:
beta_t_tilde.append(0)
if(gamma_tilde_t):
beta_t_u_tilde=(1/gamma_tilde_t)*(gamma_t*nu[len(nu)-1])
beta_t_tilde.append(beta_t_u_tilde)
else:
beta_t_tilde.append(0)
return(beta_t_tilde)
def compute_new_beta(gamma,w,nu,tau):
beta_new=[]
gamma_tilde=compute_gamma_tilde(gamma,tau)
for t in range(len(gamma_tilde)):
if(t==0):
beta_t_tilde=compute_beta_t_tilde(t,gamma[t],gamma_tilde[t],w[t],None,nu,tau[t])
else:
beta_t_tilde=compute_beta_t_tilde(t,gamma[t],gamma_tilde[t],w[t],beta_new[t-1],nu,tau[t])
params_dirich=gamma_tilde[t]*np.array(beta_t_tilde)
beta_t=dirichlet_generate_random(params_dirich)
beta_new.append(beta_t.tolist())
return(beta_new)
#new_beta=compute_new_beta(gamma,w,nu,Tau)
```
**Calcul de $\pi$ ** <br\>
De même que pour $\beta$, on calcule $\pi$ de ma façon suivante : <br\>
$$(\pi_{ju}^t,\pi_{j1}^t,...,\pi_{jK}^t)\sim Dirichlet(\tilde{\alpha_{0j}^t}.(\tilde{\pi_{ju}^t},\tilde{\pi_{j1}^t},...,\tilde{\pi_{jK}^t}))$$ avec
$$\tilde{\alpha}_{0j}^t=\alpha_0^t+ N^t_{j.}$$ et
$$ \tilde{\pi_{jk}^t} = \frac{1}{\tilde{\alpha_0^t}}(\alpha_0^t v^t \pi_{jk}^{t-1} + \alpha_0^t(1 - v^t)\beta_k^t+ N^t_{jk})$$
<br\>
et
<br\>
$$ \tilde{\pi_{ju}^t} = \frac{1}{\tilde{\alpha_0^t}}(\alpha_0^t v^t \pi_{jk}^{t-1} + \alpha_0^t(1 - v^t)\beta_k^t$$
```
def compute_alpha_tilde(alpha,Nu):
alpha_tilde=[]
for j in range(len(Nu[0])):
alpha_tilde.append((alpha+np.sum(np.array(Nu)[:,j,:],axis=1)).tolist())
alpha_tilde=np.transpose(np.array(alpha_tilde))
return(alpha_tilde.tolist())
#alpha_tilde=compute_alpha_tilde(alpha,Nu)
def compute_pi_t_j_tilde(t,j,alpha_0_t,alpha_0_t_tilde,v_t,pi_tmoins1_j,beta_t,Nu_t_j):
pi_t_j_tilde=[]
if(t!=0):
for k in range(len(beta_t)-1):
if(alpha_0_t_tilde>0):
pi_t_j_tilde_k=(1/alpha_0_t_tilde)*(alpha_0_t*v_t[k]*pi_tmoins1_j[k]+alpha_0_t*(1-v_t[k])*beta_t[k]+Nu_t_j[k])
pi_t_j_tilde.append(pi_t_j_tilde_k)
else:
pi_t_j_tilde.append(0)
else:
for k in range(len(beta_t)-1):
if(alpha_0_t_tilde>0):
pi_t_j_tilde_k=(1/alpha_0_t_tilde)*(alpha_0_t*beta_t[k]+Nu_t_j[k])
pi_t_j_tilde.append(pi_t_j_tilde_k)
else:
pi_t_j_tilde.append(0)
if(alpha_0_t_tilde>0):
pi_t_j_tilde_u=(1/alpha_0_t_tilde)*(alpha_0_t*beta_t[len(beta_t)-1])
else:pi_t_j_tilde_u=0
pi_t_j_tilde.append(pi_t_j_tilde_u)
return(pi_t_j_tilde)
def compute_new_pi(alpha_0,v,beta,gamma,w,Nu):
pi_new=[]
alpha_tilde=compute_alpha_tilde(alpha_0,Nu)
for t in range(len(alpha_0)):
pi_new_t=[]
if(t==0):
for j in range(len(Nu[0])):
pi_new_t_j_tilde=compute_pi_t_j_tilde(t,j,alpha_0[t],alpha_tilde[t][j],v[t],None,beta[t],Nu[t][j])
params_dirich=alpha_tilde[t][j]*np.array(pi_new_t_j_tilde)
pi_new_t_j=list(dirichlet_generate_random(params_dirich))
pi_new_t.append(pi_new_t_j)
else:
for j in range(len(Nu[0])):
pi_new_t_j_tilde=compute_pi_t_j_tilde(t,j,alpha_0[t],alpha_tilde[t][j],v[t],pi_new[t-1][j],beta[t],Nu[t][j])
params_dirich=alpha_tilde[t][j]*np.array(pi_new_t_j_tilde)
pi_new_t_j=list(dirichlet_generate_random(params_dirich))
pi_new_t.append(pi_new_t_j)
pi_new.append(pi_new_t)
return(pi_new)
#new_pi=compute_new_pi(alpha,v,beta,gamma,w,Nu)
```
Si on a plusieurs topics présents par corpus, cette fonction en extrait les plus fréquents. <br\> Cette fonction permet de vérifier nos résultats
```
def get_best_topic_from_pi(K,N,average_phi,name_word,nom_test,boucle_number,param_w,k):
bestAll=[]
file = open("K_{}_w_{}_test_{}.txt".format(k,param_w,nom_test),"a")
file.write("\n\n\n___________Boucle{}___________\n\n".format(boucle_number))
file.write("\n\n----- {} topics en tout -----:".format(K))
df=pd.DataFrame()
topic=[]
prob=[]
for t in range(len(N)):
list_info_t=[]
file.write("\n\n----- Temps {} -----:".format(t))
for j in range(len(N[t])):
file.write("\nCorpus {}:\n\n".format(j))
best=np.argsort(-np.array(N[t][j]))
list_info_t_j=''
for i in range(min(4,len(best)-1)):
if(N[t][j][best[i]] !=0):
file.write("\nTopic #{}={} with {} docs \n" .format(i,best[i],N[t][j][best[i]]))
list_info_t_j+='{}({}),'.format(best[i],N[t][j][best[i]])
if(nom_test=='small_doc'):
best_words=np.argsort(-np.array(average_phi[best[i]]))
for u in range(5):
file.write("Best words #{} = {} with p= {}\n".format(u,name_word[best_words[u]],average_phi[best[i]][best_words[u]]))
elif(nom_test=='synthetic_data'):
file.write("p1={},p2={}\n".format(average_phi[best[i]][0],average_phi[best[i]][1]))
if(best[i] not in topic):
topic.append(best[i])
prob.append(average_phi[best[i]][0])
list_info_t.append(list_info_t_j)
df['time_{}'.format(t)]=list_info_t
d = {'#topic': topic, 'proba_1': prob}
df_topic=pd.DataFrame(data=d)
file.close()
return(df,df_topic)
```
On **resample** chaque observation en suivant (20), (21) et l'information à posteriori donnée par (4.5). <br\>
En sortie, on a le topic le plus à même d'étre lié avec l'observation ainsi que la moyenne de chaque topic.
En effet chaque: $\phi_k \sim Dir(param)$ où param est calculé à posteriori. La moyenne de chaque r.v. nous informe sur le topic et nous permet de faire des comparaisons avec les résultats obtenus en Table 2 de l'article.
<br/> <br/>
On peut calculer $$ P(z_{ji}^t=k / x_{ji}^t)\sim P(z_{ji}^t=k/ \pi_j^t).P(x_{ji}^t/ z_{ji}^t=k...)$$
On sait que $$ P(z_{ji}^t=k/ \pi_j^t) = \pi_{jk}^t $$
De plus, $$ P(x_{ji}^t/ z_{ji}^t=k...) = \frac{\Gamma(n+1) \Gamma (\sum_{a\in A,w}^{W} X_{aw} +\alpha_w)
\prod_{w=1}^{W} \Gamma (\alpha_w + x_{jiw}^t+ \sum_{a\in A} X_{aw}) }{\Gamma (\sum_{a\in A,w}^{W} X_{aw} +\alpha_w + x_{jiw}^t) \prod_{w=1}^{W} [\Gamma ( x_{jiw}^t +1) \Gamma (\alpha_w + \sum_{a\in A} X_{aw}) ]} $$
Avec $A = ((i,j,t),Z_{ji}^t=k)$
<br/>
Après normalisation des $P(z_{ji}^t=k / x_{ji}^t)$, on selectionne un nouveau topic pour chaque document.
<br\> On retourne aussi la moyenne des $\phi_k \sim Dir(\alpha_1 + \sum_{a\in A} X_{a1},...,\alpha_W + \sum_{a\in A} X_{aW}) $
```
def get_new_z_i_j_t_egal_k(last_iteration,t,j,i,x_i_j_t,X,Z,pi_jt,W):
proba=[]
log_proba=[]
average_phi=[]
Z_with_no_Xijt=copy.deepcopy(Z)
X_with_no_Xijt=copy.deepcopy(X)
del Z_with_no_Xijt[t][j][i]
del X_with_no_Xijt[t][j][i]
for k,pi_jtk in enumerate(pi_jt):
average_phi_k=[]
flat_Z=[item for y in Z_with_no_Xijt for x in y for item in x]
flat_X=[item for y in X_with_no_Xijt for x in y for item in x]
Z_tij_k=[doc for doc,topic in zip(flat_X,flat_Z) if (topic==k)]
produit_numerateur=1
produit_denominateur_1=1
produit_denominateur_2=1
a=np.sum(Z_tij_k)
b=np.sum(x_i_j_t)
for w in range(W):
if(len(Z_tij_k)==0):
c=0
else:
c=np.sum(Z_tij_k,axis=0)[w]
if(last_iteration):
average_phi_k.append(((1/W)+c)/(1+a))
produit_numerateur+=gammaln(x_i_j_t[w]+(1/W)+c)
produit_denominateur_1+=gammaln((1/W)+c)
produit_denominateur_2+=gammaln(1+x_i_j_t[w])
log_proba.append(pi_jt[k]*mpmath.exp((gammaln(len(x_i_j_t)+1)+gammaln(a+1)+produit_numerateur)-(produit_denominateur_2+produit_denominateur_1+gammaln(a+b+1))))
average_phi.append(average_phi_k)
somme=sum(log_proba)
for k,pi_jtk in enumerate(pi_jt):
log_proba[k]=float(log_proba[k]/somme)
max_indice=np.random.choice(len(pi_jt),1,p=log_proba)
return(max_indice,average_phi)
#newZ=get_new_z_i_j_t_egal_k(1,0,0,0,data[0][0][0],data,Z,pi[0][0],W)
```
On resample **toutes** les observations et on obtient les nouveaux N, qui sont les compteurs d'assignation aux topics
```
def get_new_Z(data,pi,Z,W,K):
T=np.random.permutation(len(data))
for t in T:
J=np.random.permutation(len(data[t]))
#print('Temps{}'.format(t))
for j in J:
I=np.random.permutation(len(data[t][j]))
for i in I:
#if(i%2==0):
#print(i)
if(t==(len(data)-1) and j==(len(data[t])-1) and i==(len(data[t][j])-1)):
Z[t][j][i],average_phi=get_new_z_i_j_t_egal_k(1,t,j,i,data[t][j][i],data,Z,pi[t][j],W)
else:
Z[t][j][i],unused_var=get_new_z_i_j_t_egal_k(0,t,j,i,data[t][j][i],data,Z,pi[t][j],W)
N=[]
for t in range(len(data)):
N_t=[]
for j in range(len(data[t])):
N_t.append(compute_n_t_j(K,Z[t][j]))
N.append(N_t)
return(Z,N,average_phi)
def sampling_xi(K,a,b,last_xi,M,nb_iter):
new_xi=last_xi
for i in range(nb_iter):
eta=beta_generate_random([new_xi+1,M])
kai=a+K-1
bet=b-np.log(eta)
if(random.uniform(0, 1)<kai/kai+(M*bet)):
kai+=1
new_xi=np.random.gamma(kai,bet)
return(new_xi)
#new_xi=sampling_xi(10,10,1,12,26,20)
def sampling_alpha_t(t,alpha_a,alpha_b,last_alpha_t,tables,nb_iter,doc):
J=len(tables[0])
if(type(tables[t][0][0][2])==int):
m=tables[t][0][0][2]
else:
m=np.sum(tables[t][:][:][2])
a=alpha_a+m
b=alpha_b
al=last_alpha_t
for i in range(nb_iter):
a=alpha_a+m
b=alpha_b
for j in range(J):
w=beta_generate_random([al+1,len(doc[j])])
t=len(doc[j])/al
s=(random.uniform(0, 1)<(t/(t+1)))
a-=s
b-=np.log(w)
al=np.random.gamma(a,b)
return(al)
def sampling_gamma_t(t,gamma_a,gamma_b,last_gamma_t,metatables,nb_iter,nb_doc_temps_t):
m=np.sum(metatables[t][:][2])
a=gamma_a+m
b=gamma_b
al=last_gamma_t
for i in range(nb_iter):
a=gamma_a+m
b=gamma_b
w=beta_generate_random([al+1,nb_doc_temps_t])
t=nb_doc_temps_t/al
s=(random.uniform(0, 1)<(t/(t+1)))
a-=s
b-=np.log(w)
al=np.random.gamma(a,b)
return(al)
```
# ALGORITHME
```
def algo_evo_hdp(synthetic_data,small_doc,GOT,max_iter,param_w,K):
#----Create Data----#
k_init=K
if(synthetic_data):
nom_test='synthetic_data'
corpora_sizes=[[50,30,40],[51,32,43],[52,32,43],[53,34,45]]
true_phi=np.zeros((8,2))
true_phi[0]=[0.1,0.9]
true_phi[1]=[0.2,0.8]
true_phi[2]=[0.3,0.7]
true_phi[3]=[0.4,0.6]
true_phi[4]=[0.5,0.5]
true_phi[5]=[0.6,0.4]
true_phi[6]=[0.7,0.3]
true_phi[7]=[0.8,0.2]
T=4
J=3
W=2
name_word=0
info_data=local_components_and_corpora_sizes(T,J,corpora_sizes)
data=generate_data_from_mixture_of_multinomials(T,J,info_data,true_phi)
elif(small_doc):
nom_test='small_doc'
data,T,J,W,name_word=generate_easy_data(stop_words)
elif(GOT):
nom_test='GOT'
data,name_doc,T,J,W,name_word=generate_GOT_data(stop_words)
else:
print("error")
nb_doc_par_temps=np.zeros((T))
for t in range(T):
for j in range(J):
nb_doc_par_temps[t]+=len(data[t][j])
#----Initialize Hyperparameters----#
a_xi=10
b_xi=1
xi=np.random.gamma(a_xi,b_xi)
a_gamma=10
b_gamma=1
a_alpha=10
b_alpha=1
gamma=[]
gamma=[np.random.gamma(a_gamma,b_gamma) for i in range(T)]
alpha=[]
alpha=[np.random.gamma(a_alpha,b_alpha) for i in range(T)]
v=T*[K*[param_w]]
w=T*[param_w]
#----Initialize parameters----#
params_loi_H=0.5
nu=stick_breaking(xi,K,W)
beta=initialize_G_0_t(gamma,nu,T,K,w)
pi=initialize_G_j_t(beta,alpha,J,T,K,v)
Z,N=randomly_assign_Z_initialisation(T,J,K,data)
#----Iterate Cascaded Gibbs Sampler----#
for i in tqdm(range(max_iter)):
boucle_number=i
#print("++++++ITERATION++++++: {}".format(i))
Tables,MetaTable,Tau,Nu=compute_Tables_Metatables(T,J,v,w,pi,beta,alpha,gamma,nu,N,K)
#on conserve seulement les topics qui ont été choisis pour décrire au moins un document.
M_k=np.sum(np.array(MetaTable)[:,:,2],axis=0)
M=np.sum(M_k)
liste_indice=np.nonzero(M_k)
M_k=M_k[M_k>0]
param_dir=list(M_k)
K=len(param_dir)
## sampling xi,gamma,alpha
xi=sampling_xi(K,a_xi,b_xi,xi,M,20)
new_gamma=[]
new_alpha=[]
for t in range(T):
new_gamma.append(sampling_gamma_t(t,a_gamma,b_gamma,gamma[t],MetaTable,20,nb_doc_par_temps[t]))
new_alpha.append(sampling_alpha_t(t,a_alpha,b_alpha,alpha[t],Tables,20,data[t]))
gamma=new_gamma
alpha=new_alpha
#on ajoute un topic pour l'itération suivante
param_dir.append(xi)
nu=dirichlet_generate_random(param_dir)
beta=compute_new_beta(gamma,w,nu,Tau)
pi=compute_new_pi(alpha,v,beta,gamma,w,Nu)
K=len(beta[0])
v=T*[K*[param_w]]
Z,N,average=get_new_Z(data,pi,Z,W,K)
#----Print Mean of topic and N----#
#print('---Beta=---:\n{}'.format(beta))
#print('---Pi=---:\n{}'.format(pi))
#print('---Average---=\n{}\n'.format(average))
#print('---N---:\n{}'.format(N))
if (0==(i+1)%20):
df,df_topic=get_best_topic_from_pi(K,N,average,name_word,nom_test,boucle_number,param_w,k_init)
df
df_topic
#print('---Tau---\n{}'.format(Tau))
#print('---Nu---\n{}'.format(Nu))
#print('---Tables---\n{}'.format(Tables))
#print('---MetaTable---\n{}'.format(MetaTable))
return(df,df_topic)
'''
k=[10,100,500]
max_iter=40
w=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
writer = pd.ExcelWriter('output..xlsx')
for init in range(len(k)):
writer = pd.ExcelWriter('Output_k=_{}.xlsx'.format(k[init]))
for i in range(len(w)):
print("Iteration {}\{}".format(i+len(w)*init,len(w)*len(k)))
df_nb_doc_topic,df_topic=algo_evo_hdp(1,0,0,max_iter,w[i],k[init])
df_nb_doc_topic.to_excel(writer,sheet_name='k_{}_w_{}'.format(k[init],w[i]),startrow=0 , startcol=0)
df_topic.to_excel(writer,sheet_name='k_{}_w_{}'.format(k[init],w[i]),startrow=5, startcol=0,index = False)
writer.save()
'''
# Les lignes ci dessus doivent être décommentées pour étudier l'effet des paramètres sur l'analyse synthetic data.
# Dans ce cas, le modèle peut prend 15 heures sur une mémoire RAM de 4Go
# Pour un test de l'algo, décommentez la ligne suivante :
#df_nb_doc_topic,df_topic=algo_evo_hdp(1,0,0,100,0.8,50)
# Les 3 premiers paramètres de l'algorithme :
# algo_evo_hdp(synthetic_data,small_doc,GOT,max_iter,param_w,K)
#(1,0,0) pour l'expérimentation sur données synthétiques
#(0,1,0) pour l'expérimentation small_doc
#(0,0,1) pour l'expérimentation GOT
# max_iter est le nombre d'itérations
# param_w est l'initialisation de w, entre 0 et 1, plus w est grand, plus les temps et corpus sont corrélés entre eux
# K est le nombre de topic fixé en initialisation. Le modèle stipule que K est infini pour la première itération.
# K doit donc être relativement élevé
#en colonnes les corpus
# en ligne les temps
# structure identique à celle de l'article
df_nb_doc_topic
# La première colonne correspond au numéro du topic
# La seconde est la première coordonnées
df_topic
```
# Conclusion
L'algorithme est très lent car l'optimisation est faîte par Gibbs Sampling. <br\><br\>
** Pour l'analyse sur les synthetic data** <br\>
Après un nombre assez faible d'itération (environ 5), on retrouve les topics décrivant chacun des lots de données.
Les résultats peuvent s'avérer approximatifs selon les paramètres donnés et le comportement de l'algorithme sur un grand nombre d'itérations n'a pas été observé.
**Pour l'analyse d'un petit nombre de documents**<br\>
Les topics données sont cohérents mais les documents sont très basiques.
**Pour l'analyse Game Of Thrones**<br\>
On retrouve les noms des personnages principaux dans les topics. Cependant, les corpus de documents tendent à converger vers un seul topic, qui contient le nom de tous les personnages principaux présents du début à la fin de la série.
**Points à améliorer :** <br\>
- Chercher une base de documents plus intéréssante pour étudier les résultats de l'algorithme
- Optimiser l'algorithme
- Etudier les approches AEVB, ADVI pour optimiser les paramètre du modèle.
- Etudier le "component collapsing" (Dinh & Dumoulin, 2016) où le modèle reste bloqué sur un minimum local avec des topics identiques (ce qui est le cas pour l'analyse Game Of Thrones)
- Etudier les articles proposés par Nadi, plus récents et combinant réseaux de neuronnes et topic modelling.
| github_jupyter |
## 1. Importing important price data
<p>Every time I go to the supermarket, my wallet weeps a little. But how expensive is food around the world? In this notebook, we'll explore time series of food prices in Rwanda from the <a href="https://data.humdata.org/dataset/wfp-food-prices">United Nations Humanitarian Data Exchange Global Food Price Database</a>. Agriculture makes up over 30% of Rwanda's economy, and over 60% of its export earnings (<a href="https://www.cia.gov/library/publications/the-world-factbook/geos/rw.html">CIA World Factbook</a>), so the price of food is very important to the livelihood of many Rwandans.</p>
<p>The map below shows the layout of Rwanda; it is split into five administrative regions. The central area around the Capital city, Kigali, is one region, and the others are North, East, South, and West.</p>
<p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_515/img/RwandaGeoProvinces.png" alt="A map of the five administrative regions of Rwanda"></p>
<p>In this notebook, we're going to import, manipulate, visualize and forecast Rwandan potato price data. We'll also wrap our analysis into functions to make it easy to analyze prices of other foods.</p>
```
# Load the readr and dplyr packages
library(readr)
library(dplyr)
# Import the potatoes dataset
potato_prices <- read_csv('datasets/Potatoes (Irish).csv')
# Take a glimpse at the contents
glimpse(potato_prices)
```
## 2. Once more, with feeling
<p>Many of the columns in the potato data aren't very useful for our analysis. For example, the <code>adm1_name</code> column is always <code>"Rwanda"</code>, and <code>cur_name</code> is always <code>"RWF"</code>. (This is short for Rwandan Franc; for context, 1000 RWF is a little over 1 USD.) Similarly, we don't really need any of the ID columns or the data source.</p>
<p>Even the columns we do need have slightly obscure names. For example, <code>adm1_id</code> isn't as clear as <code>region</code>, and <code>mkt_name</code> isn't as clear as <code>market</code>. One of the most types of data analysis disaster is to misunderstand what a variable means, so naming variable clearly is a useful way to avoid this. One trick is that any variable that includes a unit should include that unit in the variable name. Here, the prices are given in Rwandan Francs, so <code>price_rwf</code> is a good name.</p>
```
# Import again, only reading specific columns
potato_prices <- read_csv("datasets/Potatoes (Irish).csv",
col_types=cols_only(adm1_name='c',
mkt_name='c',
cm_name='c',
mp_month='i',
mp_year='i',
mp_price='d'))
# Rename the columns to be more informative
potato_prices_renamed <- plyr::rename(potato_prices, c('adm1_name'='region',
'mkt_name'='market',
'cm_name'='commodity_kg',
'mp_month'='month',
'mp_year'='year',
'mp_price'='price_rwf'))
# Check the result
potato_prices_renamed
```
## 3. Spring cleaning
<p>As is often the case in a data analysis, the data we are given isn't in quite the form we'd like it to be. For example, in the last task the month and year were given as integers. Since we'll be performing some time series analysis, it would be helpful if they were provided as dates. Before we can analyze the data, we need to spring clean it.</p>
```
# Load lubridate
library(lubridate)
# Convert year and month to Date
potato_prices_cleaned <- potato_prices_renamed %>%
mutate(date=ymd(paste(year, month, '01'))) %>%
select(-year, -month)
# See the result
potato_prices_cleaned
```
## 4. Potatoes are not a balanced diet
<p>As versatile as potatoes are, with their ability to be boiled, roasted, mashed, fried, or chipped, the people of Rwanda have more varied culinary tastes. That means you are going to have to look at some other food types!</p>
<p>If we want to do a similar task many times, we could just cut and paste our code and change bits here and there. This is a terrible idea, since changing code in one place doesn't keep it up to date in the other places, and we quickly end up with lots of bugs.</p>
<p>A better idea is to write a function. That way we avoid cut and paste errors and can have more readable code.</p>
```
# Wrap this code into a function
read_price_data <- function(commodity){
file_name = paste0('datasets/', commodity, '.csv')
prices <- read_csv(file_name, col_types = cols_only(adm1_name = col_character(),
mkt_name = col_character(),
cm_name = col_character(),
mp_month = col_integer(),
mp_year = col_integer(),
mp_price = col_double()))
prices_renamed <- prices %>% rename(region = adm1_name,
market = mkt_name,
commodity_kg = cm_name,
month = mp_month,
year = mp_year,
price_rwf = mp_price)
prices_cleaned <- prices_renamed %>%
mutate(date = ymd(paste(year, month, "01"))) %>%
select(-month, -year)
}
# Test it
pea_prices <- read_price_data('Peas (fresh)')
glimpse(pea_prices)
```
## 5. Plotting the price of potatoes
<p>A great first step in any data analysis is to look at the data. In this case, we have some prices, and we have some dates, so the obvious thing to do is to see how those prices change over time.</p>
```
# Load ggplot2
library(ggplot2)
# Draw a line plot of price vs. date grouped by market
ggplot(potato_prices_cleaned, aes(x=date, y=price_rwf, group=market)) +
geom_line(alpha=0.2) +
labs(title = "Potato price over time")
```
## 6. What a lotta plots
<p>There is a bit of a trend in the potato prices, with them increasing until 2013, after which they level off. More striking though is the seasonality: the prices are lowest around December and January, and have a peak around August. Some years also show a second peak around April or May.</p>
<p>Just as with the importing and cleaning code, if we want to make lots of similar plots, we need to wrap the plotting code into a function.</p>
```
# Wrap this code into a function
plot_price_vs_time <- function(prices, commodity){
prices %>%
ggplot(aes(date, price_rwf, group = market)) +
geom_line(alpha = 0.2) +
ggtitle(paste0(commodity, " price over time"))
}
# Try the function on the pea data
plot_price_vs_time(pea_prices, 'Pea')
```
## 7. Preparing to predict the future (part 1)
<p>While it's useful to see how the prices have changed in the past, what's more exciting is to forecast how they will change in the future. Before we get to that, there are some data preparation steps that need to be performed.</p>
<p>The datasets for each commodity are very rich: rather than being a single time series, they consist of a time series for each market. The fancy way of analyzing these is to treat them as a single hierarchical time series. The easier way, that we'll try here, is to take the average price across markets at each time and analyze the resulting single time series.</p>
<p>Looking at the plots from the potato and pea datasets, we can see that occasionally there is a big spike in the price. That probably indicates a logistic problem where that food wasn't easily available at a particular market, or the buyer looked like a tourist and got ripped off. The consequence of these outliers is that it is a bad idea to use the <em>mean</em> price of each time point: instead, the <em>median</em> makes more sense since it is robust against outliers.</p>
```
# Group by date, and calculate the median price
potato_prices_summarized <- potato_prices_cleaned %>%
group_by(date) %>%
summarize(median_price_rwf=median(price_rwf))
# See the result
potato_prices_summarized
```
## 8. Preparing to predict the future (part 2)
<p>Time series analysis in R is at a crossroads. The best and most mature tools for analysis are based around a time series data type called <code>ts</code>, which predates the tidyverse by several decades. That means that we have to do one more data preparation step before we can start forecasting: we need to convert our summarized dataset into a <code>ts</code> object.</p>
```
# Load magrittr
library(magrittr)
max_date = max(potato_prices_summarized$date)
min_date = min(potato_prices_summarized$date)
# Extract a time series
potato_time_series <- ts(potato_prices_summarized$median_price_rwf,
end=c(year(max_date), month(max_date)),
start=c(year(min_date), month(min_date)),
frequency=12)
# See the result
potato_time_series
```
## 9. Another day, another function to write
<p>Those data preparation steps were tricky! Wouldn't it be really nice if we never had to write them again? Well, if we wrap that code into a function, then we won't have to.</p>
```
# Wrap this code into a function
create_price_time_series <- function(prices){
prices_summarized <- prices %>%
group_by(date) %>%
summarize(median_price_rwf = median(price_rwf))
time_series <- prices_summarized %$%
ts(median_price_rwf,
start = c(year(min(date)), month(min(date))),
end = c(year(max(date)), month(max(date))),
frequency = 12)
}
# Try the function on the pea data
pea_time_series <- create_price_time_series(pea_prices)
pea_time_series
```
## 10. The future of potato prices
<p>All the preparation is done and we are ready to start forecasting. One question we might ask is "how do I know if I can trust our forecast?". Recall that both the potato and the pea data had strong seasonality (for example, potatoes were most expensive around August and cheapest around December). For agricultural data, a good forecast should show a similar shape throughout the seasons.</p>
<p>Now then, are we ready to see the future?</p>
```
# Load forecast
library(forecast)
# Forecast the potato time series
potato_price_forecast <- forecast(potato_time_series)
# View it
potato_price_forecast
# Plot the forecast
autoplot(potato_price_forecast, main='Potato price forecast')
```
## 11. The final function
<p>Nice! The forecast shows the spike in potato prices in late summer and the dip toward the end of the year.</p>
<p>With this analysis step, just as the previous steps, to make things repeatable, we need to wrap the code into a function.</p>
```
# Wrap the code into a function
plot_price_forecast <- function(time_series, commodity){
price_forecast <- forecast(time_series)
autoplot(price_forecast,
main = paste0(commodity, " price forecast"))
}
# Try the function on the pea data
plot_price_forecast(pea_time_series, 'Pea')
```
## 12. Do it all over again
<p>That was a lot of effort writing all that code to analyze the potato data. Fortunately, since we wrapped all the code into functions, we can easily take a look at any other food type.</p>
```
# Choose dry beans as the commodity
commodity <- "Beans (dry)"
# Read the price data
bean_prices <- read_price_data('Beans (dry)')
# Plot price vs. time
plot_price_vs_time(bean_prices, 'Bean')
# Create a price time series
bean_time_series <- create_price_time_series(bean_prices)
# Plot the price forecast
plot_price_forecast(bean_time_series, 'Bean')
```
| github_jupyter |
```
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from decimal import *
import scipy.special
import scipy.stats
import scipy
import numpy
import math
import itertools
import sys
sys.version
```
# Out-of-band P2W attack evaluation
## P2W attack success probability within $ N $ total blocks (no attacker hash rate $\alpha$ )
* $\omega$ This denotes the hash rate of all bribed miners (or the hashrate of the attacker if applicable)
* $\beta = 1 -
\omega$ The hash rate of the nodes that keep on mining on the main chain i.e., Bob if all others switch
* $k$ The number of conirmations blocks required to catch up
* $N$ The total number of blocks that can be rewarded
A new block is appended to the *main chain* with probability:
$$
\dfrac{\beta}{\omega+\beta} = \beta
$$
A new block is appended to the *attack cahin* with probability:
$$
\dfrac{\omega}{\omega+\beta} = \omega
$$
All successful series of blocks up to the maximum number of blocks $ N - n $ are given by:
$$
\sum_{i=0}^{i \le N - k -1}
{
\left(\binom{k+2i}{i} - \binom{k+2i}{i-1}\right)
\cdot
{ \omega }^{k+1+i}
\cdot
{ \beta }^{i}
}
\,\,.
$$
```
# Attack probability of catching-up k blocks within N total blocks
def attack_prob(k,N,omega,beta=None):
if beta is None:
beta = 1 - omega
p = 0
#for i in range(0,N-k):
i = 0
while i <= N-k-1:
p += ( scipy.special.binom(k+2*i, i) - scipy.special.binom(k+2*i, i-1) ) * omega**(k+i+1) * beta**i
#print(i,p)
i += 1
return p
assert math.isclose(attack_prob(2,3,0.8,0.2),0.512)
assert math.isclose(attack_prob(2,4,0.8,0.2),0.7577600000000001)
assert math.isclose(attack_prob(2,5,0.8,0.2),0.8757248000000003)
assert math.isclose(attack_prob(2,6,0.8,0.2),0.9344450560000004)
assert math.isclose(attack_prob(2,7,0.8,0.2),0.9646440448000003)
attack_prob(2,3,0.8,0.2)
attack_prob(1,200,0.1)
attack_prob(k=6,N=6,omega=0.670)
attack_prob(k=6,N=7,omega=0.670)
# Get number of required total blocks s.t. attack success probability is met
def attack_N(k,omega,success_prob):
N = 0
prob = 0
while prob < success_prob:
#prob = probCalc_optimized.compute_prob(k, N, omega,beta=1-omega)
prob = attack_prob(k, N, omega,beta=1-omega)
N += 1
# the last N value produces the right prob not the current one, therefore -1
return N - 1
attack_N(k=6,omega=0.532,success_prob=0.995)
attack_N(k=6,omega=0.67,success_prob=0.995)
attack_N(k=6,omega=0.67,success_prob=1)
# calculate expected number of blocks in which attack is successful i.e., target sucess probability is reached with this amount of N
def attack_expected_estimate(k,N,omega,beta=None):
if beta is None:
beta = 1 - omega
P = 0
p = 0
e = 0
i = 0
while i <= N-k-1:
# Current success probability:
p = ( scipy.special.binom(k+2*i, i) - scipy.special.binom(k+2*i, i-1) ) * omega**(k+i+1) * beta**i
# Overall target success probability:
P += p
# Expected value:
# y is the real current value of blocks passed
# this is why we need to add k+1 to y
# to account for the k+1 in the formular and the loop condition
y = i+k+1
if math.isnan(p):
print("p isnan P=",P," e=",e)
return e
e += p * y
#print(P,p,i,y,e)
i += 1
p = 0
return e
math.ceil(attack_expected_estimate(k=6,N=41,omega=0.670))
math.ceil(attack_expected_estimate(k=6,N=265,omega=0.670))
math.ceil(attack_expected_estimate(k=6,N=500,omega=0.670))
math.ceil(attack_expected_estimate(k=6,N=500,omega=0.764))
math.ceil(attack_expected_estimate(k=6,N=500,omega=0.828))
math.ceil(attack_expected_estimate(k=6,N=500,omega=0.887))
import numpy as py
def simulate_race(p_omega = 0.670, # rational hashrate
k_v = 6,
runs = 1):
p_beta = 1-p_omega
i=0
results={"mblk":0,
"ablk":0,
"N":0}
while (i<runs):
achain = 0
mchain = k_v
#results["mblk"] = k_v
while(achain <= mchain):
values = np.random.choice([0,1],1,p=[p_beta,p_omega])
for v in values:
if v == 0:
mchain += 1
results["mblk"] += 1
else:
achain +=1
results["ablk"] += 1
results["N"] += 1
i += 1
return results
simulate_race()
runs=10**4
%time simulate_race(p_omega=0.532,runs=runs)["N"]/runs
runs=10**5
%time simulate_race(p_omega=0.670,runs=runs)["N"]/runs
runs=10**5
%time simulate_race(p_omega=0.764,runs=runs)["N"]/runs
runs=10**5
%time simulate_race(p_omega=0.828,runs=runs)["N"]/runs
runs=10**5
%time simulate_race(p_omega=0.887,runs=runs)["N"]/runs
runs=10**5
%time simulate_race(p_omega=0.999,runs=runs)["N"]/runs
def attack_expected(k,N,omega,beta=None,runs=10**3):
return math.ceil(simulate_race(p_omega=omega,runs=runs)["N"]/runs)
attack_expected(k=6,N=265,omega=0.670)
```
### Compare with classical catch-up
General catch up condition $ a_z $ from:
* https://arxiv.org/pdf/1402.2009.pdf
* https://bitcoin.org/bitcoin.pdf
Classical parameter names:
* $p$ = hashrate of the honest network
* $q$ = hashrate of the attacker
$$
p + q = 1
$$
* $n$ = number of confirmation blocks, including and on top of the block which includes the respective transaction which needs to be confirmed or double-spend respectively
The probability to **ever** catch-up if $ z $ blocks behind, where one block is added to $z$ when the honest miners find a block and one block is substracted from $ z $ when the attacker finds a block i.e.,
The catch-up probability is a simplification of the *recurrance relation*, where $ q = \alpha $ and $ p = \beta $, given as:
$$
\begin{align}
a_z &= p \cdot a_{z+1} + q \cdot a_{z-1} \\
&= min(q/q,1)^{max(z+1,0)}
\end{align}
$$
```
def catch_up(q,z):
if q >= 0.5:
return 1.0
return ( q/(1-q) )**(z+1)
```
With $ p \geq 0.5 $ of hashpower the attacker will always catch up:
```
assert catch_up(0.5,100)*100 == 100.0
catch_up(0.5,100)*100
```
With $ p < 0.5 $ of hashpower the probability drops exponentially with $ z $:
```
catch_up(0.49,6)*100
catch_up(0.49,12.5)*100
catch_up(0.49,25)*100
catch_up(0.49,50)*100
catch_up(0.49,100)*100
```
#### Comparision
```
# Probability to catch up 1 blocks i.e., 1 block behind in unlimited time/blocks
catch_up(q=0.33,z=1)*100
# Probability to catch up 1 blocks i.e., 1 block behind in (large) limited time/blocks
attack_prob(1,100,0.33)*100
# Probability to catch up 1 blocks i.e., 1 block behind in unlimited time/blocks
catch_up(q=0.66,z=1)*100
# Probability to catch up 1 blocks i.e., 1 block behind in limited time/blocks i.e., 12
attack_prob(1,12,0.66)*100
```
### Comparison of catching-up within $ N $ total blocks vs catching-up after $ \infty $ blocks
```
def fig_prob_to_catch_up():
fig, ax = plt.subplots(figsize=(16, 9))
# https://matplotlib.org/api/markers_api.html
marker = itertools.cycle(('o','v', '+', 's','.', ',', '*','1','D','x','^'))
omega = [ 0.1, 0.2, 0.33, 0.4, 0.5, 0.66 ]
N = np.arange(0,33)
color = itertools.cycle(( 'b','orange','c','r', 'g','darkviolet',))
"""
NUM_COLORS = len(omega)
cm = plt.get_cmap('gist_rainbow')
colors = [cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)]
color = iter(colors)
#ax.set_color_cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
"""
for o in reversed(omega):
current_color=next(color)
plt.plot(N, [ attack_prob(1,n,o)*100 for n in N ] , color=current_color, marker=next(marker), label=" $\omega$=" + str(o))
P = catch_up(o,1)*100
plt.plot([0, len(N)], [P, P], color=current_color, linestyle='--', linewidth=2)
plt.plot([2, 2], [-2, 100], color='k', linestyle='-', linewidth=4)
plt.plot([0, 100], [100, 100], color='k', linestyle='-', linewidth=4)
#plt.plot([0, 50], [50, 50], color='k', linestyle='--', linewidth=3)
# tidy up the figure
ax.grid(True)
#ax.legend(loc='center right', bbox_to_anchor=(0.8, 0.57))
ax.legend(loc='center right', bbox_to_anchor=(0.90, 0.30), framealpha=1, prop={'size': 24} )
#ax.set_title("Probability of catching-up 1 block after $ N $ total blocks")
ax.set_ylabel("Probability of catching-up in $ \% $ ", fontsize=28)
ax.set_xlabel("Number of blocks till catching up ($ N $)", fontsize=28)
ax.set_yscale('log'),
#ax.set_ylim([-1,81])
ax.set_yticks([1,5,10,25,50,75,100])
ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xlim([0,len(N)-1])
# draw line at 0
#plt.plot([x1, x2], [y1, y2], color='k', linestyle='-', linewidth=2)
#plt.plot([0, 100], [0, 0], color='k', linestyle='--', linewidth=2)
#plt.yticks(np.arange(-1, 15, step=1))
plt.xticks(np.arange(0, len(N), step=2))
#plt.yscale('log')
plt.rcParams.update({'font.size': 23})
plt.rc('xtick', labelsize=24)
plt.rc('ytick', labelsize=24)
plt.savefig("plots/number_of_blocks_for_catching-up_z=1.png", bbox_inches='tight',dpi=100)
plt.show()
fig_prob_to_catch_up()
```
### P2W attack success probability plot
```
def fig_probability_success_after_at_most_N_steps():
fig, ax = plt.subplots(figsize=(16, 9))
# https://matplotlib.org/api/markers_api.html
marker = itertools.cycle((',','v', '+', 's','.', 'o', '*','1','D','x','^'))
x = np.arange(0, 36)
for omega in [0.33, 0.40, 0.49, 0.51, 0.6, 0.66, 0.75, 0.90, 1 ]:
plt.plot(x, [ attack_prob(6, N, omega,beta=1-omega) for N in x ] , marker=next(marker), label=" ω=" + str(omega))
# tidy up the figure
ax.grid(True)
#ax.legend(loc='center right', bbox_to_anchor=(0.8, 0.57))
ax.legend(loc='center right', bbox_to_anchor=(0.185, 0.57), framealpha=1, prop={'size': 20})
#ax.set_title("Attack success probability")
ax.set_xlabel("Number of steps/blocks (N) that the attack is funded", fontsize=28)
ax.set_ylabel("Attack success probability",fontsize=28)
ax.set_ylim([1e-10, 1])
ax.set_xlim([0,35])
#plt.yscale('log')
plt.rcParams.update({'font.size': 17})
plt.rc('xtick', labelsize=17)
plt.rc('ytick', labelsize=17)
plt.savefig("plots/attack_probability_no-hashrate_n=6_N=35.png", bbox_inches='tight', dpi=100) # sage image
plt.show()
fig_probability_success_after_at_most_N_steps()
# This figure compares against the probabilities mentioned in the whale attack paper
# https://www.cs.umd.edu/~jkatz/papers/whale-txs.pdf
def fig_probability_success_after_at_most_N_steps():
fig, ax = plt.subplots(figsize=(16, 9))
# https://matplotlib.org/api/markers_api.html
marker = itertools.cycle((',','v', '+', 's','.', 'o', '*','1','D','x','^'))
x = np.arange(0, 64)
for omega in [0.532, 0.670, 0.764, 0.828, 0.887, 0.931, 0.968, 0.999 ]:
plt.plot(x, [ attack_prob(6, N, omega,beta=1-omega) for N in x ] , marker=next(marker), label=" ω=" + "{:.3f}".format(omega))
# tidy up the figure
ax.grid(True)
#ax.legend(loc='center right', bbox_to_anchor=(0.8, 0.57))
ax.legend(loc='center right', bbox_to_anchor=(0.80, 0.30))
#ax.set_title("Attack success probability")
ax.set_xlabel("Max steps (N) that the attack can be funded")
ax.set_ylabel("Attack success probability")
ax.set_ylim([1e-10, 1])
ax.set_xlim([0,63])
#plt.yscale('log')
plt.rcParams.update({'font.size': 17})
plt.rc('xtick', labelsize=17)
plt.rc('ytick', labelsize=17)
plt.savefig("plots/attack_probability_no-hashrate_n=6_N=60.png", dpi=100) # sage image
plt.show()
fig_probability_success_after_at_most_N_steps()
```
## Costs of the attack
Transaction exclusion and ordering attack costs for $ \omega = 1 $:
```
N = 6 # duration of the attack
#reward = 12.5 # block reward
reward = 6.25 # block reward
fee = 2 # average fees rounded to 1 (see other notebook)
r_b = reward + fee
epsilon = 1 # bribe
c_operational = 0.5 # operational costs
f_B = N * ( r_b + epsilon ) + c_operational # required funds of the attacker which are payed in Ethereum
c_fail = N * r_b + c_operational
c_success = N * epsilon + c_operational
print("N = ",N)
print("c_operational = ",c_operational)
print("r_b = ",r_b)
print("epsilon = ",epsilon)
print("f_B = ",f_B)
print("c_fail = ",c_fail)
print("c_success = ",c_success)
```
Transaction revision,exclusion and ordering attack costs for $ \omega = 1 $:
https://en.bitcoin.it/wiki/Controlled_supply
```
k = 6 # security parameter of victim
N = k+1 # minimum N
#reward = 12.5 # block reward
#fee = 1 # average fees rounded to 1 (see other notebook)
reward = 6.25 # block reward
#reward = 12.5
fee = 2
r_b = reward + fee
epsilon = 1 # bribe per block
# epsilon = 0.0002 # min. bribe in BTC (smalest transferable value w.r. to tx fees)
c_operational = 0.5 # operational costs
exchangerate = 13_077.50
f_B = k * r_b + N * ( r_b + epsilon ) + c_operational # required funds of the attacker which are payed in Ethereum
c_fail = N * r_b + c_operational
c_success = k * r_b + N * epsilon + c_operational
print("k = ",k)
print("N = ",N)
print("c_operational = ",c_operational)
print("r_b = ",r_b)
print("epsilon = ",epsilon)
print("f_B = ",f_B, " BTC")
print(" = ","{:,}".format(f_B*exchangerate), " USD")
print("c_fail = ",c_fail, " BTC")
print(" = ","{:,}".format(c_fail*exchangerate), " USD")
print("c_success = ",c_success, " BTC")
print(" = ","{:,}".format(c_success*exchangerate), " USD")
```
We compare against https://www.cs.umd.edu/~jkatz/papers/whale-txs.pdf
```
# costs of an 0.532 percent whale attack transactions
# taken from https://www.cs.umd.edu/~jkatz/papers/whale-txs.pdf
print("{:.0f}".format(2.93e+23))
print("{:.2e}".format(2.93e+23))
```
### Big Table of P2W attack costs vs whale attack costs
```
bribe_per_block=1 # additional bribe epsilon
block_reward=6.25 # current block reward
#block_reward=12.5 # current block reward
#block_reward=25 # current block reward
block_fees_average=2 # current average fees per block
operational_costs=0.5
block_total= block_reward + block_fees_average
block_total_plus_bribe = block_total + bribe_per_block
success_prob = 0.9950
# success_prob = 0.99510916
print("""
\\begin{table}[]
\\centering
\\label{tab:costs}
\\begin{tabular}{c|c|c|c|c|c|c|c|c|c}
""")
print("$\omega$ \t& whale costs \t& \\makecell{p2w costs\\\\ fail} \t& \% whale \t& \\makecell{p2w costs\\\\ success} \t& \% whale \t& \\makecell{p2w costs\\\\ expected} & \\% whale & \\makecell{N\\\\ (w.c.)} & \makecell{N\\\\ (avg.)} \\\\")
print("\\hline")
# data from https://www.cs.umd.edu/~jkatz/papers/whale-txs.pdf
# (omega,k,BTC)
whale_omega_k_costs = [ (0.532,6,2.93e+23),
(0.670,6,999.79),
(0.764,6,768.09),
(0.828,6,1265.14),
(0.887,6,1205.00),
(0.931,6,1806.67),
(0.968,6,2178.58),
(0.999,6,2598.64) ]
# test values
"""
whale_omega_k_costs = [ (0.532,0,1),
(0.670,0,1),
(0.764,0,1),
(0.828,0,1),
(0.887,0,1),
(0.931,0,1),
(0.968,0,1),
(0.999,0,1),
(1.0,0,1)]
"""
for whale in whale_omega_k_costs:
N = 0
prob = 0
omega=whale[0]
k=whale[1]
whale_costs=whale[2]
N = attack_N(k,omega,success_prob)
# costs of failed attack
c_p2w_fail = N * block_total + operational_costs
# costs of successful p2w attack
c_p2w_succ = N*bribe_per_block + k*block_total + operational_costs
# calculated number of expected blocks
expected = math.ceil(attack_expected(k,N,omega,beta=1-omega))
c_p2w_exp = expected*bribe_per_block + k*block_total + operational_costs
"""
print("omega = {:.3f} prob = {:.3f} N = {:3.0f} c_p2w_succ = {:.8f} c_p2w_fail = {:.8f} bribe_per_block = {:2.0f}".format(omega,
prob,
N,
c_p2w_succ,
c_p2w_fail,
bribe_per_block))
"""
print("{:.3f} \t\t& {:.2f} \t& {:.0f} \t\t& {:.0f} \t\t& {:.2f} \t\t& {:2.0f} \t\t& {:.0f} \t\t& {:.2f} \t& {:2.0f} \\\\".format(omega,
whale_costs,
c_p2w_fail,
c_p2w_succ,
(c_p2w_succ/whale_costs)*100,
N,
c_p2w_exp,
(c_p2w_exp/whale_costs)*100,
expected ) )
N = 0
prob = 0
print("\end{tabular}")
print("\\caption{{Comparison of attack costs whale attack~\cite{{liao2017incentivizing}} for $ k_V = {:d} $, all costs given in BTC. For comparision different Bitcoin block reward epochs (12.5 and 6.25 BTC) are provided for our P2W attack, all with $ c_{{operational}} = {:.1f} $ BTC, and average fee per block of {:.0f} BTC and $ \epsilon = {:.0f} $ BTC.}}".format(k,operational_costs,block_fees_average,bribe_per_block))
print("\end{table}")
```
### Small Table of P2W attack costs vs whale attack costs
```
bribe_per_block=1 # additional bribe epsilon
block_fees_average=2 # current average fees per block
operational_costs=0.5 # operational costs of attack
success_prob = 0.9950 # success_prob = 0.99510916
# current block reward epoch
block_total_6= 6.25 + block_fees_average
block_total_6_plus_bribe = block_total_6 + bribe_per_block
# previous block reward epoch
block_total_12= 12.5 + block_fees_average
block_total_12_plus_bribe = block_total_12 + bribe_per_block
print("""
\\begin{table*}[]
\\centering
\\scriptsize
\\label{tab:costs}
\\begin{tabular}{c|c||c|c|c||c}
""")
print("""
\makecell{Rational \\\\ hashrate \\\\ $\\omega$} &
\makecell{Average whale attack costs \\\\ epoch reward $ 12.5 $ \\\\ $ c_{whale} $ in BTC} &
\makecell{P2W \\\\ epoch reward $ 12.5 $ \\\\ $ c_{expected} $ in BTC} &
\makecell{P2W cost \\\\ compared to whale } &
\makecell{P2W \\\\ $ N $ \\\\ average} &
\makecell{P2W \\\\ epoch reward $ 6.25 $ \\\\ $ c_{expected} $ in BTC} \\\\
""")
print("\\hline")
# data from https://www.cs.umd.edu/~jkatz/papers/whale-txs.pdf
# (omega,k,BTC)
whale_omega_k_costs = [ (0.532,6,2.93e+23),
(0.670,6,999.79),
(0.764,6,768.09),
(0.828,6,1265.14),
(0.887,6,1205.00),
(0.931,6,1806.67),
(0.968,6,2178.58),
(0.999,6,2598.64) ]
for whale in whale_omega_k_costs:
N = 0
prob = 0
omega=whale[0]
k=whale[1]
whale_costs=whale[2]
# calculate number of required block to reach target success_probability (close to one)
N = attack_N(k,omega,success_prob)
# calculated number of expected blocks
expected = math.ceil(attack_expected(k,N,omega,beta=1-omega))
# costs for block reward epoch 12.5
c_p2w_exp_12 = expected*bribe_per_block + k*block_total_12 + operational_costs
# costs for block reward epoch 6.25
c_p2w_exp_6 = expected*bribe_per_block + k*block_total_6 + operational_costs
print("{:.3f} \t\t& {:.2f} \t& {:.2f} \t\t& {:.2f}\% \t\t& {:d} \t\t& {:.2f} \\\\".format(
omega,
whale_costs,
c_p2w_exp_12,
(c_p2w_exp_12/whale_costs)*100,
expected,
c_p2w_exp_6) )
N = 0
prob = 0
print("\end{tabular}")
print("\\caption{{Comparison of attack costs for $ k_V = {:d} $, all costs given in BTC. The costs for the whale attack are the average from $ 10^6 $ simulation results provided in~\cite{{liao2017incentivizing}}. For comparision different Bitcoin block reward epochs (12.5 and 6.25 BTC) are provided for our P2W attack, all with $ c_{{operational}} = {:.1f} $ BTC, and average fee per block of {:.0f} BTC and a bribe $ \epsilon = {:.0f} $ BTC.}}".format(k,operational_costs,block_fees_average,bribe_per_block))
print("\end{table*}")
```
## Probability of Out-of-Band attack desynchronization
What is the probability that the two chains (funding and attack chain) desynchronize during an attack i.e.,
that two (or more) Bitcoin blocks are mined in close succession without an Ethereum block in between.
The time between Bitcoin as well as the time between Ethereum blocks is exponentially distributed.
Assuming constant difficulty and overall hashrate, Ethereum has a mean block interval i.e., an expected value $E(x)$ of $ 15 $ seconds, whereas Bitcoin has a mean block interval of $ 10\cdot 60 $ seconds.
$$
\begin{align}
E_{ETH}(x) &= 15 \\
E_{BTC}(x) &= 600
\end{align}
$$
$$
\begin{align}
P(X < x) &=
\begin{cases}
1 - \exp(-\lambda x) & x \geq 0, \\
0 & x < 0
\end{cases} \\
P(X \geq x) &= exp(-\lambda x)
\end{align}
$$
#### What is th probability that the time between two Bitcoin blocks is smaller than the mean Etheruem block interval?
What is the probability that the time between two Bitcoin blocks is less than the Ethereum mean block interval of 15 seconds?
<a id='p2in1'></a>
$$
\begin{align}
E_{BTC}(x) &= 600 \\
E_{ETH}(x) &= 15 \\
x &= 15 \\
\lambda &= \frac{1}{E_{BTC}(x)} \\
P(X < 15) &= 1 - e^{-\lambda \cdot x} \\
\end{align}
$$
```
import math
vlambda = 1/(10*60)
x = 15
P_leq_eth = 1 - math.e**( -vlambda * x)
print("The probability that the time between two Bitcoin blocks is less than 15 seconds is\n{} %".format(P_leq_eth * 100))
```
What is the probability that this happens within $ N $ Bitcoin blocks i.e.,
what is the probability that the time between two Bitcoin blocks is smaller than $ 16 $ seconds during $ N $ total Bitcoin blocks?
$$
\begin{align}
P(N) &= 1 - ( 1 - P(X < 15) ) ^ {N-1}
\end{align}
$$
```
N = 32
P_within_N_btc = 1 - ( 1 - P_leq_eth )**(N-1)
P_within_N_btc * 100
```
What is the probability that two new Bitcoin blocks arrive within less than 15 seconds each?
```
P_leq_eth * P_leq_eth * 100
```
#### What is the probability that $2,3,...$ Bitcoin blocks are found within the Ethereum mean block interval of 15 seconds?
We use a Poission point process, where the poission distribution parameter $ \Lambda = E(N=n) = \frac{t}{E_{BTC}(x)} $ referres to the expected value of the number of events happening within $ t $ time.
$$
\begin{align}
P(N=n) &= \frac{\Lambda^n}{n!} \cdot e^{-\Lambda} \\
\lambda &= \frac{1}{E_{BTC}(x)} = 1/600 \\
\Lambda &= E(N=n) = t/600 \\
t &= 15 \\
n &\in \{1,2,3,...\}
\end{align}
$$
Note that $ n=1 $ already stands for two sequential Bitcoin blocks since we assume Bitcoin and Ethereum start at the same point in time ($ n = 0 $) with a new block each. So if the first Bitcoin block is found before the mean Ethereum block interval we have it that two Bitcoin blocks have been found without a Ethereum block in between.
What is the probability that **exaclty** $ n $ Bitcoin blocks are found within the mean Etheruem block interval of $15$ seconds?
```
def P_n_within_t(n,t,E_x):
Lambda = t/E_x
P_n_within_t = ( ( Lambda**n )/( math.factorial(n) ) ) * math.e**( - Lambda )
return P_n_within_t
n = 1
E_x = 600
t = 15
P_n_within_t(n,t,E_x) * 100
# Double check with scipy
n = 1
E_x = 600
mu = t/E_x
assert scipy.stats.poisson.pmf(n,mu)*100 == P_n_within_t(n,t,E_x)*100
scipy.stats.poisson.pmf(n,mu) * 100
n = 2
t = 15
E_x = 600
P_n_within_t(n,t,E_x) * 100
```
What is the probability that **at least** $ n $ Bitcoin blocks are found within the mean Etheruem block interval of $15$ seconds?
Lets approximate the result first by iterating and adding the next values of $ n $ to get
the value for at least $ n $ blocks within time $ t $.
For $ n = 1 $ this should be approximately the same value as calculated previously for the probability that the time between two Bitcoin blocks is smaller than the Ethereum block intervall of $ 15 $ seconds i.e., $ 2.4690087971667385 \% $
[see here](#p2in1)
```
P_n_within_t(n,t,E_x) * 100 + P_n_within_t(2,15,600) * 100 # ...
# approximate the result by iterating and adding the next values of n
def P_at_least_n_within_t_approx(n,t,E_x,iterations=10):
p = 0
for i in range(n,n+iterations):
p += P_n_within_t(i,t,E_x)
return p
n = 1
t = 15
E_x = 600
P_at_least_n_within_t_approx(n,t,E_x) * 100
P_at_least_n_within_t_approx(2,15,600) * 100
P_n_within_t(2,15,600) * 100 # for comparison
P_at_least_n_within_t_approx(3,15,600) * 100
P_at_least_n_within_t_approx(4,15,600) * 100
P_at_least_n_within_t_approx(5,15,600) * 100
```
To accurately capture this case we use the complementary probability of the CDF of the Poisson Distribution.
We calulate the probability to find at least $ n $ blocks within time $ t $ and expected value $ E[x] $ as mean time between Bitcoin blocks.
Therefore $ \lambda = t/E[x] $ is the average number of events per interval.
We calculate the complementary probability for finding at most $ n-1 $ blocks as follows.
$$
\begin{align}
P(X > n) &= 1 - P(X \leq n-1) \\
P(X \leq n) &= F(x) = e^{-\lambda } \sum_{i=0}^{n-1} \frac{\lambda ^i}{i!}
\end{align}
$$
```
def P_at_least_n_within_t(n,t,E_x):
Lambda = t/E_x
p = 0
for i in range(0,n): # is equal to {0,...,n-1}
p += ( Lambda**i ) / ( math.factorial(i) )
return 1 - ( math.e**(-Lambda) * p )
n = 1
t = 15
E_x = 600
P_at_least_n_within_t(n,t,E_x) * 100
# Double check with scipy and complementary probability of cdf(0).
# i.e., the complement of the probability that 0 (or less) blocks are found during 15 seconds?
n = 0
E_x = 600
t = 15
mu = t/E_x
assert P_at_least_n_within_t(1,t,E_x) == (1 - scipy.stats.poisson(mu).cdf(0))
(1 - scipy.stats.poisson(mu).cdf(n)) * 100
# Double check with probability that the time between two Bitcoin blocks is
# less than 15 seconds.
n = 1
t = x = 15
E_x = 600
vlambda = 1/E_x
assert P_at_least_n_within_t(n,t,E_x) * 100 == (1 - math.e**( -vlambda * x) )*100
P_at_least_n_within_t(n,t,E_x) * 100
```
Double check this case by using the continous Erlang distribution.
We calulate the probability to find at least $ n $ blocks within time $ t $ and expected value $ E[x] $ as mean time between Bitcoin blocks as follows.
The probability that $ n $ (sometimes denoted $ k $ and refered to as "shape") events happen in time $ x $ when $ \lambda = 1/E[x] $ is the rate at which events happen. $ \mu = 1/\lambda $ is the reciprocal of the rate and sometimes refered to as "scale".
$$
\begin{align}
F(x) &= \begin{cases}
1 - e^{-\lambda x} \sum_{i=0}^{n-1} \frac{(\lambda x)^i}{i!} & x \geq 0, \\
0 & x < 0
\end{cases} \\
\end{align}
$$
```
def P_at_least_n_within_t_erlang(n,t,E_x):
Lambda = 1/E_x
x = t
p = 0
for i in range(0,n): # is equal to {0,...,n-1}
p += ( ( Lambda * x )**i ) / ( math.factorial(i) )
return 1 - ( math.e**(-Lambda*x) * p )
n = 1
t = 15
E_x = 600
assert P_at_least_n_within_t(n,t,E_x) * 100 == P_at_least_n_within_t_erlang(n,t,E_x) * 100
P_at_least_n_within_t_erlang(n,t,E_x) * 100
# Double check with scipy
n = 1
t = 15
E_x = 600
assert scipy.stats.erlang(a=n, scale=1/(1/E_x)).cdf(t) * 100 == scipy.stats.erlang(a=n, scale=E_x).cdf(t) * 100
scipy.stats.erlang(a=n, scale=E_x).cdf(t) * 100
n = 2
E_x = 600
t = 15
scipy.stats.erlang(a=n, scale=E_x).cdf(t) * 100
# Double check against complement of Poisson cdf
n = 2
E_x = 600
t = 15
mu = t/E_x
(1 - scipy.stats.poisson(mu).cdf(n-1)) * 100
n = 2
E_x = 600
t = 15
P_at_least_n_within_t_erlang(n,t,E_x) * 100
n = 2
t = 15
E_x = 600
P_at_least_n_within_t(n,t,E_x) * 100
n = 3
t = 15
E_x = 600
P_at_least_n_within_t(n,t,E_x) * 100
n = 4
t = 15
E_x = 600
P_at_least_n_within_t(n,t,E_x) * 100
```
What is the probability that this happens within $ N $ Bitcoin blocks i.e.,
what is the probability that the time between three Bitcoin blocks is smaller than $ 15 $ seconds during $ N $ total Bitcoin blocks.
$$
\begin{align}
P(N) &= 1 - ( 1 - P(n) ) ^ {\lceil N/n \rceil}
\end{align}
$$
Recall both chains start at the same moment in time therefore $ n = 1 $ already stands for two sequential Bitcoin blocks
```
N = 32
n = 1
P_at_leat_n_within_N = 1 - ( 1 - P_at_least_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_at_leat_n_within_N * 100
# for comparision the less accurate value
N = 32
n = 1
P_n_within_N = 1 - ( 1 - P_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_n_within_N * 100
# 3 Bitcoin blocks within 15 seconds over N total Bitcoin blocks
N = 32
n = 2
P_at_leat_n_within_N = 1 - ( 1 - P_at_least_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_at_leat_n_within_N * 100
# 3 Bitcoin blocks within 15 seconds over N total Bitcoin blocks
N = 32
n = 2
P_at_least_n_within_N = 1 - ( 1 - P_at_least_n_within_t_erlang(n,15,600) )**(math.ceil((N-1)/n))
P_at_least_n_within_N * 100
N = 32
n = 3
P_at_leat_n_within_N = 1 - ( 1 - P_at_least_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_at_leat_n_within_N * 100
N = 32
n = 4
P_at_leat_n_within_N = 1 - ( 1 - P_at_least_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_at_leat_n_within_N * 100
```
#### Probability that the time between two Ethereum blocks is larger than the mean Bitcoin block interval
What is the probability that the time between two Ethereum blocks is larger than the Bitcoin mean block interval of 600 seconds?
$$
\begin{align}
E_{BTC}(x) &= 600 \\
E_{ETH}(x) &= 15 \\
x &= 600 \\
\lambda &= \frac{1}{E_{ETH}(x)} \\
P(X \geq x) &= e^{-\lambda \cdot x} \\
\end{align}
$$
```
import math
vlambda = 1/(15)
x = 600
P_gt_btc = math.e**( - vlambda * x )
P_gt_btc * 100
```
#### Probability that during a time period $ t $, a Bitcoin block is mined before an Ethereum block
Assuming both chains are synchronized and miners start their search for the next block at the same time on the respective chains.
What is that probability that during the whole time period the next Bitcoin block is mined before the next Etheruem block.
$$
\begin{align}
P_{ETH}(X \geq x) &= e^{-\frac{1}{15} \cdot x} \\
P_{BTC}(X < x) &= 1 - e^{-\frac{1}{600} \cdot (x+1)} \\
P(t) &= \sum_{i=0}^{t} P_{ETH}(X \geq x) \cdot P_{BTC}(X < x)
\end{align}
$$
```
# P_e(X >= x_1) * P_b(X < (x_1 + 1) ) + P_e(X >= x_2) * P_b(X < (x_2 + 1) + ...
def P_e(x,vbeta):
# P(X >= x)
vlambda = 1/vbeta
return math.e**( - vlambda * x )
def P_b(x,vbeta):
# P(X < (x+1))
vlambda = 1/vbeta
return 1 - math.e**( - vlambda * (x+1) )
t = 200
P = 0
for i in range(1,t):
#print("{:3}: {:8.7f}".format(i,P))
P += P_e(i,15) * P_b(i,600)
print("\nThe probability that a Bitcoin block arrives before an Ethereum block in an interval of {} seconds is\n {} %".format(t,P * 100))
```
### Simulation of desynchronization
Simulate the two chains and check if there ever occures a case where a sequence of Bitcoin blocks are mined without any Ethereuem block in between.
```
import numpy
vbeta_btc = 10*60 # mean time between Bitcoin blocks (whole network)
N_btc = 32 # Total number of Bitcoin block events for the attack
vlambda_btc = 1/vbeta_btc
rand_btc = numpy.random.exponential(vbeta_btc,N_btc)
print(rand_btc)
#print(np.cumsum(rand)) # sort values ascending
print(" lambda = ",vlambda_btc)
print(" E(x) (beta) = ",vbeta_btc)
print(" Var(x) = ",vbeta_btc**2)
import numpy
vbeta_eth = 15 # mean time between Ethereum blocks
N_eth = int( (sum(rand_btc).item() / vbeta_eth) + (sum(rand_btc).item() / vbeta_eth)*0.1 ) # Total number of Ethereum block events for the attack plus some extra margin
vlambda_eth = 1/vbeta_eth
rand_eth = numpy.random.exponential(vbeta_eth,N_eth)
print(rand_eth)
#print(np.cumsum(rand)) # sort values ascending
print(" len(rand_eth)= ",len(rand_eth))
print(" lambda = ",vlambda_eth)
print(" E(x) (beta) = ",vbeta_eth)
print(" Var(x) = ",vbeta_eth**2)
def find_block_in_the_middle(begin,end,input_list):
t = 0
for k in input_list:
t += k
if t > begin and t < end:
return True
if t > end:
return False
return False
def compare_chains(rand_btc_list,rand_eth_list,verbose=False,sequence=2):
"""
Check if a sequence of n bitcoin blocks exists which
is not interrupted by a ethereum block somewhere in between
"""
btc_time = 0
btc_blkcnt = 0
eth_time = 0
eth_blkcnt = 0
fail_cnt = 0
blkgap = 0
sequence = sequence - 1 # reduce sequence by one since we count additional blocks starting from current
begin_btc = 0
for i in range(0,len(rand_btc_list)):
begin_btc += rand_btc_list[i]
end_btc = begin_btc
if i+sequence < len(rand_btc_list):
for j in range(1,sequence+1):
pos = i+j
end_btc += rand_btc_list[pos]
if begin_btc != end_btc and not find_block_in_the_middle(begin_btc,end_btc,rand_eth_list):
if verbose:
esum = 0
for num,entry in enumerate(rand_eth_list):
esum += entry
if esum > begin_btc*0.8:
print("ETH: {:4d} : {:8.4f} += {:10.4f}".format(num,entry,esum))
if esum > end_btc:
break
print("inter: {} - {} begin: [{}] = {} end: [{}] = {}".format(begin_btc,
end_btc,
i,
rand_btc_list[i],
pos,
rand_btc_list[pos]))
esum = 0
for num,entry in enumerate(rand_btc_list):
esum += entry
if num >= i:
print("\tBTC: {:4d} : {:8.4f} += {:10.4f}".format(num,entry,esum))
if num > pos:
break
return "fail"
return "success"
rand_btc_list = rand_btc.tolist()
rand_eth_list = rand_eth.tolist()
compare_chains(rand_btc_list,rand_eth_list,True,sequence=1)
def simulate_chains(vbeta_btc,vbeta_eth,N_btc,verbose=False,sequence=2):
rand_btc = numpy.random.exponential(vbeta_btc,N_btc)
N_eth = int( (sum(rand_btc).item() / vbeta_eth) + (sum(rand_btc).item() / vbeta_eth)*0.33 ) # Total number of Ethereum block events for the attack plus some
rand_eth = numpy.random.exponential(vbeta_eth,N_eth)
rand_btc_list = rand_btc.tolist()
rand_eth_list = rand_eth.tolist()
return compare_chains(rand_btc_list,rand_eth_list,verbose,sequence)
simulate_chains(600,15,32,False)
from collections import Counter
def simulation_run(iterations,N_btc,sequence):
cnt = Counter()
for i in range(0,iterations):
cnt[simulate_chains(600,15,N_btc,False,sequence)] += 1
k1 = cnt.most_common(2)[0][0]
v1 = (cnt.most_common(2)[0][1]/iterations)*100
if len(cnt.most_common(2)) > 1:
k2 = cnt.most_common(2)[1][0]
v2 = (cnt.most_common(2)[1][1]/iterations)*100
else:
k2 = "fail"
v2 = 0
print("{:10}: {}\n{:10}: {}".format(k1,v1,k2,v2))
iterations = 100
N_btc = 32
sequence = 2
simulation_run(iterations,N_btc,sequence)
iterations = 10000
N_btc = 32
sequence = 2
simulation_run(iterations,N_btc,sequence)
# compare with calculated approximation
N = 32
P_within_N_btc = 1 - ( 1 - (1 - math.e**( -vlambda * x)) )**(N-1)
P_within_N_btc * 100
# compare with other calculated approximation
N = 32
n = 1
P_at_least_n_within_N = 1 - ( 1 - P_at_least_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_at_least_n_within_N * 100
iterations = 10000
N_btc = 32
sequence = 3
simulation_run(iterations,N_btc,sequence)
# compare with calculated approximation
N = 32
n = 2
P_at_least_n_within_N = 1 - ( 1 - P_at_least_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_at_least_n_within_N * 100
iterations = 10000
N_btc = 32
sequence = 4
simulation_run(iterations,N_btc,sequence)
# compare with calculated approximation
N = 32
n = 3
P_at_least_n_within_N = 1 - ( 1 - P_at_least_n_within_t(n,15,600) )**(math.ceil((N-1)/n))
P_at_least_n_within_N * 100
iterations = 10000
N_btc = 32
sequence = 5
simulation_run(iterations,N_btc,sequence)
iterations = 10000
N_btc = 32
sequence = 6
simulation_run(iterations,N_btc,sequence)
```
## Calculate some concreate values for P2W attacks
### precision limits
```
# approx smallest target probability (99,510916 %) which can be calculated for smallest omega form the
# whale attack paper (53,2%)
attack_N(k=6,omega=0.532,success_prob=0.99510916)
# for k=1 max target is 99,89713 %
attack_N(k=1,omega=0.532,success_prob=0.9989713)
# approx smallest omega which can be calculated with floats and target probability 99%
attack_N(k=1,omega=0.517,success_prob=0.99)
math.ceil(attack_expected(k=1,N=492,omega=0.52)) # expected number of blocks for this value
attack_prob(k=1,N=21,omega=0.517)*100 # Probability of success after that many extra rewarded blocks
21 * 10 / 60 # duration of the attack in hours (Bitcoin)
21 * 15 / 60 # duration of the attack in min. (Ethereum)
catch_up(0.33,1)
attack_N(k=1,omega=0.33,success_prob=0.24)
attack_expected(k=1,N=18,omega=0.24)
def attack_limits(k,omega):
if omega > 0.5:
max_P = 0.99
else:
max_P = round(catch_up(omega,k) - 0.005,3) # round down to xx.xx precent
max_N = attack_N(k,omega,success_prob=max_P)
exp_N = math.ceil(attack_expected(k,max_N,omega))
P = attack_prob(k,max_N,omega)
exp_P = attack_prob(k,exp_N,omega)
return (max_P,max_N,P,exp_N,exp_P)
attack_limits(k=1,omega=0.75)
attack_limits(k=1,omega=0.70)
attack_limits(k=1,omega=0.66)
attack_limits(k=1,omega=0.60)
attack_limits(k=1,omega=0.55)
attack_limits(k=1,omega=0.517)
attack_limits(k=1,omega=0.479)
attack_limits(k=1,omega=0.33)
attack_limits(k=2,omega=0.33)
attack_limits(k=1,omega=0.15)
attack_limits(k=1,omega=0.10)
```
| github_jupyter |
```
import re
from tqdm import tqdm
from collections import defaultdict, Counter, UserDict
from itertools import product
from cached_property import cached_property
from litecoder.models import session, City
from litecoder import logger
def keyify(text):
text = text.lower()
text = text.strip()
text = text.replace('.', '')
text = re.sub('[,-]', ' ', text)
# 2+ whitespace -> 1 space
text = re.sub('\s{2,}', ' ', text)
return text
keyify('la-la land')
keyify('Tuscaloosa, AL')
keyify('Washington,DC')
class NameCounts(Counter):
def __init__(self):
logger.info('Indexing name -> counts.')
names = [keyify(r[0]) for r in session.query(City.name)]
super().__init__(names)
def __getitem__(self, text):
return super().__getitem__(keyify(text))
class NamePopulations(defaultdict):
def __init__(self):
"""Index name -> [pops].
"""
super().__init__(list)
logger.info('Indexing name -> populations.')
median_pop = City.median_population()
for city in tqdm(City.query):
for name in city.names:
self[keyify(name)].append(city.population or median_pop)
def __getitem__(self, text):
return super().__getitem__(keyify(text))
class AllowBareName:
def __init__(self, min_p2_ratio=10):
self.name_pops = NamePopulations()
self.min_p2_ratio = min_p2_ratio
def __call__(self, city, name):
all_pops = sorted(self.name_pops[name], reverse=True)
if len(all_pops) < 2:
return True
p2_ratio = (city.population or 0) / all_pops[1]
if p2_ratio > self.min_p2_ratio:
return True
return False
USA_NAMES = (
'USA',
'United States',
'United States of America',
'US',
'America',
)
class USCityKeyIter:
def __init__(self, *args, **kwargs):
self.allow_bare = AllowBareName(*args, **kwargs)
def _iter_keys(self, city):
"""Enumerate index keys for a city.
Args:
city (db.City)
Yields: str
"""
bare_names = [n for n in city.names if self.allow_bare(city, n)]
states = (city.name_a1, city.us_state_abbr)
for name in bare_names:
yield name
for name, usa in product(bare_names, USA_NAMES):
yield ' '.join((name, usa))
for name, state in product(city.names, states):
yield ' '.join((name, state))
for name, state, usa in product(city.names, states, USA_NAMES):
yield ' '.join((name, state, usa))
def __call__(self, city):
for text in self._iter_keys(city):
yield keyify(text)
city_key_iter = USCityKeyIter()
la = City.query.filter(City.country_iso=='US').filter(City.name=='Los Angeles').first()
tt = City.query.filter(City.country_iso=='US').filter(City.name=='Tuscaloosa').first()
list(city_key_iter(la))
list(city_key_iter(tt))
class USCityIndex:
def __init__(self):
self._idx = defaultdict(set)
def __getitem__(self, text):
return self._idx[keyify(text)]
def query(self, text):
return City.query.filter(City.wof_id.in_(self[text])).all()
def build(self):
"""Index all US cities.
"""
iter_keys = USCityKeyIter()
cities = City.query.filter(City.country_iso=='US')
logger.info('Indexing US cities.')
for city in tqdm(cities):
try:
# Generate keys, ensure no errors.
keys = list(iter_keys(city))
# Index complete key set.
for key in iter_keys(city):
self[key].add(city.wof_id)
except Exception as e:
pass
ci = USCityIndex()
ci.build()
len(ci._idx)
%time ci.query('pdx')
ny = ci.query('new york')[0]
ny.__dict__
list(City.__table__.columns)
```
| github_jupyter |
```
%pylab inline
from numpy.lib.recfunctions import append_fields
### Excluding the AGB stars with dust spheres around.
x = np.load("../data/GDR3/gaiadr3_0ext.npy")
cut = (x['log_lum'] <5.) & (x['log_lum'] >3.) & (x['log_teff'] <3.7) & (x['gaia_g'] > 0.5)
print(len(x), len(x[cut]))
x = np.load("../data/TMASS/2mass_0ext.npy")
print(x.dtype.names)
avs = np.array([0,1,2,3,5,10,20])
x = []
for av in avs:
temp = np.load("../data/TMASS/2mass_%dext.npy" %(av))
# we exclude the AGB dust stars from the averaging
temp = temp[~cut]
temp = append_fields(temp,"parsec_index",np.zeros(shape = len(temp),dtype = np.int32),usemask=False)
x.append(temp)
print(len(x[0]),x[0].dtype)
for name in x[0].dtype.names:
print(name,x[0][name],len(np.unique(x[0][name])))
def return_index_feh(feh):
dfeh = 0.05
offset = 1.5
return(int((feh+offset)/dfeh))
min_value = min(x[0]['meh_ini'])
max_value = max(x[0]['meh_ini'])
min_index_feh = return_index_feh(min_value)
max_index_feh = return_index_feh(max_value)
stretch = max_index_feh - min_index_feh
print('teff values in parsec')
print('teff min max value: ', min_value, max_value )
print('teff min max index: ', min_index_feh, max_index_feh)
print('dimension cut into %d pieces' %(stretch))
def return_index_teff(teff):
dteff = 0.02
return(int(teff/dteff))
min_value = min(x[0]['log_teff'])
max_value = max(x[0]['log_teff'])
min_index_teff = return_index_teff(min_value)
max_index_teff = return_index_teff(max_value)
stretch = max_index_teff - min_index_teff
print('teff values in parsec')
print('teff min max value: ', min_value, max_value )
print('teff min max index: ', min_index_teff, max_index_teff)
print('dimension cut into %d pieces' %(stretch))
def return_index_lum(lum):
dlum = 0.05
offset = 5.
return(int((lum+offset)/dlum))
min_value = min(x[0]['log_lum'])
max_value = max(x[0]['log_lum'])
min_index_lum = return_index_lum(min_value)
max_index_lum = return_index_lum(max_value)
stretch = max_index_lum - min_index_lum
print('lum values in parsec')
print('lum min max value: ', min_value, max_value )
print('lum min max index: ', min_index_lum, max_index_lum )
print('dimension cut into %d pieces' %(stretch))
def return_index(feh,teff,lum):
"""
feh given in dex
teff given in log teff
lum given in log lum
"""
index_feh = return_index_feh(feh)
if index_feh < min_index_feh:
index_feh = min_index_feh
elif index_feh > max_index_feh:
index_feh = max_index_feh
index_teff = return_index_teff(teff)
if index_teff < min_index_teff:
index_teff = min_index_teff
elif index_teff > max_index_teff:
index_teff = max_index_teff
index_teff *= 1000
index_lum = return_index_lum(lum)
if index_lum > max_index_lum:
index_lum = max_index_lum
elif index_lum < min_index_lum:
index_lum = min_index_lum
index_lum *= 1000 * 1000
assert(index_feh >= 0)
assert(index_teff >= 0)
assert(index_lum >= 0)
return (index_feh + index_teff + index_lum)
print('indexing isochrones')
for i in range(len(x[0])):
x[0]["parsec_index"][i] = return_index(x[0]["meh_ini"][i],x[0]["log_teff"][i],x[0]["log_lum"][i])
# All need to be sorted by parsec_index
sort_ind = np.argsort(x[0]['parsec_index'], kind = 'mergesort')
for i in range(len(avs)):
x[i] = x[i][sort_ind]
j_ext = np.zeros(shape=(len(x[0]),len(x)))
h_ext = np.zeros(shape=(len(x[0]),len(x)))
ks_ext = np.zeros(shape=(len(x[0]),len(x)))
for i,item in enumerate(x):
print(i,item[0])
j_ext[:,i] = item["tmass_j"]
h_ext[:,i] = item["tmass_h"]
ks_ext[:,i] = item["tmass_ks"]
# show the extinction law and on which parameters it is dependent
j_ext = j_ext - j_ext[:,0,None]
h_ext = h_ext - h_ext[:,0,None]
ks_ext = ks_ext - ks_ext[:,0,None]
ext = np.zeros(shape = (len(np.unique(x[0]["parsec_index"])),3,6),dtype = np.float32)
ext_std = np.zeros_like(ext)
parsec = np.zeros(shape = (len(np.unique(x[0]["parsec_index"]))),dtype = x[0].dtype)
parsec_std = np.zeros(shape = (len(np.unique(x[0]["parsec_index"]))),dtype = x[0].dtype)
parsec["parsec_index"] = np.unique(x[0]["parsec_index"])
parsec_std["parsec_index"] = np.unique(x[0]["parsec_index"])
indexing = np.hstack((0,np.searchsorted(x[0]['parsec_index'],parsec['parsec_index'], side='right')))
print(indexing)
for i in range(len(indexing)-1):
if i%10000==0:
print(i, len(indexing))
lower = indexing[i]
upper = indexing[i+1]
#parsec['parsec_index'][i]
#x[0]['parsec_index'][lower:upper]
temp = x[0][lower:upper]
j_temp = j_ext[lower:upper]
h_temp = h_ext[lower:upper]
ks_temp = ks_ext[lower:upper]
for jtem in parsec.dtype.names:
if jtem == "parsec_index":
assert(parsec["parsec_index"][i] == temp["parsec_index"][0])
else:
parsec[jtem][i] = np.median(temp[jtem])
parsec_std[jtem][i] = np.std(temp[jtem])
for t in np.arange(6):
ext[i,0,t] = np.median(j_temp[:,t+1])
ext[i,1,t] = np.median(h_temp[:,t+1])
ext[i,2,t] = np.median(ks_temp[:,t+1])
ext_std[i,0,t] = np.std(j_temp[:,t+1])
ext_std[i,1,t] = np.std(h_temp[:,t+1])
ext_std[i,2,t] = np.std(ks_temp[:,t+1])
np.save('ext_2mass.npy',ext)
np.save('ext_std_2mass.npy', ext_std)
np.save('parsec_2mass.npy', parsec)
np.save('parsec_std_2mass.npy', parsec_std)
# check that the stds are small and also that Galaxia falls onto those parsec indexes.
```
| github_jupyter |
# Circuit optimization using PatternManager - example of QAOA for MaxCut
This notebook provides an example of minimizing the duration of a quantum circuit. In this notebook, a quantum circuit implementing an instance of Q.A.O.A. is used and the `PatternManager` tool will be used to minimize the duration of this circuit. Since the purpose of this notebook is to explain the optimization tool `PatternManager`, details on the implementation of the circuit are not explained.
In this notebook, a variational circuit is used to solve MaxCut for the graph printed below. Solving MaxCut for a graph $\mathcal{G} = (\mathcal{V}, \mathcal{E})$ consists in finding a subset $S$ of $\mathcal{V}$ such as the number of edges in $\mathcal{E}$ linking a vertex of $S$ to a vertex of $\mathcal{V} \backslash S$ is maximal.
<img src="images/graph.png" width="500px" height="auto" alt="Graph of interaction" title="Graph of interaction"/>
The circuit used in this example can be split in 3 parts:
1. An Hadamard gate on each qubit
2. For each pair of qubits $i$ and $j$, there is an E gate if and only if $i$ and $j$ are connected in the graph above
3. An $R_X$ on each qubit
An E gate can be defined by the following pattern:
<img src="images/E_gate.png" width="500px" height="auto" alt="Porte E" title="E gate definition"/>
Our circuit (limited to the first 8 qubits) looks like:
<img src="images/algo.png" width="750px" height="auto" alt="Algorithm" title="Algorithm limited to the first 8 qubits"/>
## Initial circuit
The circuit should be created before starting the optimization. The following code defines an abstract gate `E` corresponding to the definition above. The circuit is then defined using these `E` gates. Since `PatternManager` is used to optimize the depth of the circuit, the initial order of `E` corresponds to the order which maximize the duration of the circuit.
```
from qat.lang.AQASM import Program, H, CNOT, PH, RX, QRoutine
from qat.lang.AQASM.misc import build_gate
from qat.pbo.utils import depth
import numpy as np
# Define an abstract gate E
@build_gate("E", [float], 2)
def E(alpha):
"""
Build a E gate
"""
routine = QRoutine()
routine.apply(CNOT, [0, 1])
routine.apply(PH(alpha), [1])
routine.apply(CNOT, [0, 1])
# Define the worst order of E gates
edges = [(10, 15), (9, 15), (9, 14), (4, 9), (0, 4), (0, 5),
(1, 5), (5, 10), (10, 16), (11, 16), (11, 17),
(6, 11), (1, 6), (2, 6), (2, 7), (7, 12), (12, 17),
(12, 18), (13, 18), (8, 13), (3, 8)]
# Define program
prog = Program()
qbits = prog.qalloc(19)
alpha = prog.new_var(float, r"\alpha")
beta = prog.new_var(float, r"\beta")
# Wall of hadamard
for qb in qbits:
prog.apply(H, qb)
# E gates
for vertex_1, vertex_2 in edges:
prog.apply(E(alpha), qbits[vertex_1], qbits[vertex_2])
# Wall of RX
for qb in qbits:
prog.apply(RX(beta), qb)
# Get initial circ
initial_circ = prog.to_circ()
%qatdisplay initial_circ
```
## Using metrics
The tool `PatternManager` is used to optimize any *score function* given by the user. A *score function* is a function that that the user wants to maximize. The `qat.nnize` modules provide tools to define score functions.
The `DurationMetric` class can be used as a *score function*, this class will compute the opposite of the duration of the circuit (this tool computes the opposite of the duration because maximizing the opposite of the duration is equivalent to minimizing the duration: the opposition of the duration is then the metric we want to maximize).
In our example, each gate will have the same duration: 1 unit of time.
```
from qat.nnize.metrics import DurationMetric
# Define the metric
duration_metric = DurationMetric()
# Define the default duration
duration_metric.set_gate_time({"-DEFAULT-": 1})
# The metric have to compute the duration of the circuit
duration_metric.minimize_overall_time()
# Duration of the initial circuit
print("Duration of the initial circuit:",
-duration_metric(initial_circ))
```
## Circuit optimization
The optimization problem consists in maximizing the function `duration_metric`. This function is called **global metric**, the tool `PatternManager` will use this metric to perform the optimization.
Since E gates commutes on any qubits, few rules will be defined. The tool `PatternManager` will uses these rules to optimize the duration of the circuit. The rules are defined by:
<img src="images/patterns.png" width="500px" height="auto" alt="Rewriting rules" title="Patterns"/>
There are 3 commutation rules above, so 3 groups will be defined for the optimizer. A group is a set of equivalent patterns (i.e. a small subcircuit), the optimizer can replace any pattern in the circuit by a pattern of the same group. Groups define the action space of the optimizer.
`PatternManager` will use an heuristic to solve this optimization. Two different methods may be used:
- The gradient descent (use `"gradient"`) $\rightarrow$ Used by default
- The simulated annealing (use `"annealing"`) $\rightarrow$ Used here
```
from qat.pbo import PatternManager, VAR
from qat.lang.AQASM import AbstractGate
# Define the optimizer
manager = PatternManager(global_metric=duration_metric)
# Define abstract variables
theta = VAR()
gamma = VAR()
# Group 1 - first commutation rule
group1 = manager.new_group()
group1.add_pattern([('E', [1, 2], theta), ('E', [0, 1], gamma)])
group1.add_pattern([('E', [0, 1], gamma), ('E', [1, 2], theta)])
# Group 2 - second commutation rule
group2 = manager.new_group()
group2.add_pattern([('E', [0, 1], theta), ('E', [0, 2], gamma)])
group2.add_pattern([('E', [0, 2], gamma), ('E', [0, 1], theta)])
# Group 3 - third commutation rule
group3 = manager.new_group()
x3 = VAR()
group3.add_pattern([('E', [0, 2], theta), ('E', [1, 2], gamma)])
group3.add_pattern([('E', [1, 2], gamma), ('E', [0, 2], theta)])
```
The optimizer can be then called on the circuit to minimize the duration of the circuit. A trace can be passed to the optimizer to log the values of the metric during the optimization.
Since the E gate is not a common gate, the constructor of the E gate should be given to the optimizer.
```
# Create a trace list
trace = list()
# Add E gate constructor
manager.add_abstract_gate(E)
# Start optimization
final_circ = manager.replace_pattern(initial_circ, method='annealing', trace=trace)
# Print final circuit
print("Final duration:", -duration_metric(final_circ))
```
The trace of the optimization can be plotted using matplotlib.
```
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 5))
plt.xlabel("Nb iterations")
plt.ylabel("Duration")
plt.plot(range(len(trace)), [-depth for depth in trace])
plt.show()
```
# Compilation
Before starting compilation, E gates must be replaced by their implementation. The `GraphCircuit` tool will be used to replace `E` gates.
```
from qat.pbo import GraphCircuit
# Init graph circuit
theta = VAR()
graph = GraphCircuit()
graph.load_circuit(final_circ)
# Replace pattern
graph.replace_pattern(
[("E", [0, 1], theta)],
[("CNOT", [0, 1]), ("PH", [1], theta), ("CNOT", [0, 1])],
pos=all
)
# Get circuit
final_circ = graph.to_circ()
```
One wants to compile this optimized circuit on the Rigetti Forest 19Q. Only few gates may be used on this quantum computer. The allowed gates are:
- Gate $R_Z(x)$ for $x \in \mathbb{R}$
- Gate $R_X(x)$ for $x \in \left\{ \pm \pi, \pm \frac{\pi}{2} \right\}$ (these $R_X$ gates are called "compliant $R_X$")
- Gate $CZ$
Since our algorithm does not use these gates, some changes may be defined. `PatternManager` may be used to solve this optimization problem. It is possible to define patterns which must disappear.
- The gate $PH$ must disappear: $PH(x) \rightarrow R_Z(x)$
- The gate $H$ must disappear: $H \rightarrow R_Z \left (\frac{\pi}{2} \right) \cdot R_X \left (\frac{\pi}{2} \right) \cdot R_Z \left (\frac{\pi}{2} \right)$
- The gate $CNOT$ must disappear $CNOT \rightarrow \left(\mathbb{1} \otimes H \right) \cdot CZ \cdot \left(\mathbb{1} \otimes H \right)$
### Groups
**Group 1** Only non-compliant $R_X(x)$ are transformed into $H \cdot R_Z(x) \cdot H$
**Group 2** $PH(x)$ gates are replaced by $R_Z(x)$ gates
**Group 3** $CNOT$ gates are replaced by $(\mathbb{1} \otimes H) \cdot CZ \cdot (\mathbb{1} \otimes H)$
**Group 4** $H$ gates are replaced by $R_Z \left (\frac{\pi}{2} \right) \cdot R_X \left (\frac{\pi}{2} \right) \cdot R_Z \left (\frac{\pi}{2} \right)$
```
from math import pi
# Define a compiler: no metric needed
compiler = PatternManager()
theta = VAR()
# Group 1: remove non compliant RX gates
constraint_angle = VAR()
for angle in [pi, -pi, pi/2, -pi/2]:
constraint_angle.add_prohibited_value(angle)
group_1 = compiler.new_group()
group_1.pattern_to_remove([("RX", [0], constraint_angle)])
group_1.add_pattern([("H", [0]), ("RZ", [0], constraint_angle), ("H", [0])])
# Group 2: remove PH gate
group_2 = compiler.new_group()
group_2.pattern_to_remove([("PH", [0], theta)])
group_2.add_pattern([("RZ", [0], theta)])
# Group 3: remove CNOT
group_3 = compiler.new_group()
group_3.pattern_to_remove([("CNOT", [0, 1])])
group_3.add_pattern([("H", [1]), ("CSIGN", [0, 1]), ("H", [1])])
# Group 4: remove H
group_4 = compiler.new_group()
group_4.pattern_to_remove([("H", [0])])
group_4.add_pattern([("RZ", [0], pi/2), ("RX", [0], pi/2), ("RZ", [0], pi/2)])
```
The object `compiler` can used to compile our circuit. Moreover, this object is also a plugin, it can be linked to any QPU.
## Checking compilation
First, a function which print the gate set will be used to check the compilation output:
```
from qat.core.util import extract_syntax
def print_gate_set(circuit):
gate_set = set()
for operator in circuit.ops:
name, params = extract_syntax(
circuit.gateDic[operator.gate],
circuit.gateDic
)
gate_set.add((name, *params))
print(gate_set)
```
Then, our compiler can compile our circuit using:
- in the first example, RX gates with accepted angles
- in the second example, RX gates with non-accepted angles
```
# Case 1: using RX gates with accepted angles
first_circ = compiler.replace_pattern(
final_circ.bind_variables({r"\alpha": pi/4, r"\beta": pi})
)
print("\nCase 1 with compliant RX")
print_gate_set(first_circ)
# Case 2: using RX gates with non accepted angles
second_circ = compiler.replace_pattern(
final_circ.bind_variables({r"\alpha": pi/4, r"\beta": pi/6})
)
print("\nCase 2 with non-compliant RX")
print_gate_set(second_circ)
```
| github_jupyter |
# Simple Reinforcement Learning in Tensorflow Part 1:
## The Multi-armed bandit
This tutorial contains a simple example of how to build a policy-gradient based agent that can solve the multi-armed bandit problem. For more information, see this [Medium post](https://medium.com/@awjuliani/super-simple-reinforcement-learning-tutorial-part-1-fd544fab149).
For more Reinforcement Learning algorithms, including DQN and Model-based learning in Tensorflow, see my Github repo, [DeepRL-Agents](https://github.com/awjuliani/DeepRL-Agents).
```
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
```
### The Bandit
Here we define our bandit. For this example we are using a four-armed bandit. The pullBandit function generates a random number from a normal distribution with a mean of 0. The lower the bandit number, the more likely a positive reward will be returned. We want our agent to learn to always choose the arm that will give that positive reward.
```
#List out our bandit arms.
#Currently arm 4 (index #3) is set to most often provide a positive reward.
bandit_arms = [0.2,0,-0.2,-2]
num_arms = len(bandit_arms)
def pullBandit(bandit):
#Get a random number.
result = np.random.randn(1)
if result > bandit:
#return a positive reward.
return 1
else:
#return a negative reward.
return -1
```
### The Agent
The code below established our simple neural agent. It consists of a set of values for each of the bandit arms. Each value is an estimate of the value of the return from choosing the bandit. We use a policy gradient method to update the agent by moving the value for the selected action toward the recieved reward.
```
tf.reset_default_graph()
#These two lines established the feed-forward part of the network.
weights = tf.Variable(tf.ones([num_arms]))
output = tf.nn.softmax(weights)
#The next six lines establish the training proceedure. We feed the reward and chosen action into the network
#to compute the loss, and use it to update the network.
reward_holder = tf.placeholder(shape=[1],dtype=tf.float32)
action_holder = tf.placeholder(shape=[1],dtype=tf.int32)
responsible_output = tf.slice(output,action_holder,[1])
loss = -(tf.log(responsible_output)*reward_holder)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
update = optimizer.minimize(loss)
```
### Training the Agent
We will train our agent by taking actions in our environment, and recieving rewards. Using the rewards and actions, we can know how to properly update our network in order to more often choose actions that will yield the highest rewards over time.
```
total_episodes = 1000 #Set total number of episodes to train agent on.
total_reward = np.zeros(num_arms) #Set scoreboard for bandit arms to 0.
init = tf.global_variables_initializer()
# Launch the tensorflow graph
with tf.Session() as sess:
sess.run(init)
i = 0
while i < total_episodes:
#Choose action according to Boltzmann distribution.
actions = sess.run(output)
a = np.random.choice(actions,p=actions)
action = np.argmax(actions == a)
reward = pullBandit(bandit_arms[action]) #Get our reward from picking one of the bandit arms.
#Update the network.
_,resp,ww = sess.run([update,responsible_output,weights], feed_dict={reward_holder:[reward],action_holder:[action]})
#Update our running tally of scores.
total_reward[action] += reward
if i % 50 == 0:
print "Running reward for the " + str(num_arms) + " arms of the bandit: " + str(total_reward)
i+=1
print "\nThe agent thinks arm " + str(np.argmax(ww)+1) + " is the most promising...."
if np.argmax(ww) == np.argmax(-np.array(bandit_arms)):
print "...and it was right!"
else:
print "...and it was wrong!"
```
| github_jupyter |
# B2: TCA 13C MFA demo
# Intro
# Setup
First, we need to set the path and environment variable properly:
```
quantmodelDir = '/users/hgmartin/libraries/quantmodel'
```
This is the only place where the jQMM library path needs to be set.
```
%matplotlib inline
import sys, os
pythonPath = quantmodelDir+"/code/core"
if pythonPath not in sys.path:
sys.path.append(pythonPath)
os.environ["QUANTMODELPATH"] = quantmodelDir
%matplotlib inline
import os, core, FluxModels, unittest, enhancedLists
from IPython.display import SVG, Image
```
And move to a scratch directory (to make things faster):
```
cd /scratch/hgmartin_scratch/tests
```
# Assembling SBML file
Get file names needed to produce sbml file:
```
qmodeldir = os.environ['QUANTMODELPATH']
dirDATA = qmodeldir+'/data/tests/TCAtoy/'
REACTIONSfilename = dirDATA+'REACTIONStca.txt'
FEEDfilename = dirDATA+'FEEDtca.txt'
CEMSfilename = dirDATA+'GCMStca.txt'
CEMSSTDfilename = dirDATA+'GCMSerrtca.txt'
FLUXESFreefilename = dirDATA+'FLUXtca.txt'
```
Assemble sbml file:
```
atomTransitions = enhancedLists.AtomTransitionList(REACTIONSfilename)
ReacNet = atomTransitions.getReactionNetwork('E. coli wt5h 13C MFA')
ReacNet.addLabeling(CEMSfilename,'LCMSLabelData',CEMSSTDfilename,minSTD=0.001)
ReacNet.addFeed(FEEDfilename)
ReacNet.loadFluxBounds(FLUXESFreefilename)
ReacNet.write('TCA.sbml')
```
# Flux calculation
Load sbml file into model:
```
C13model = FluxModels.C13Model('TCA.sbml')
```
Calculate fluxes:
```
%%time
results = C13model.findFluxesStds(Nrep=10, procString='proc', erase=False)
```
Plot results:
```
results.plotExpvsCompLabelFragment(titleFig='test',save="fitTest.eps")
print 'Fit:' +str(results.EMUlabel['Glu'])
print 'Exp:' +str(results.fragDict['Glu'].mdv)
results.reactionNetwork.reactionList.printFluxes(brief="False")
```
# Comparison with 13CFLUX2
We now compare these results with those obtained through 13CFLUX2, a well known package for $^{13}$C Metabolic Flux Analysis. The results of this calculation are equivalent. The commands to run this simulation (files included in library in the TCA example folder, 13CFLUX2 license required) are:
./fwdsim.py -i Ecoli/TCAexample.fml
And the results are equivalent to those above:
```
Image(os.environ["QUANTMODELPATH"]+"/data/tests/TCAtoy/13CFLUX2/OutputFig.png")
```
fit: [0.34635 0.26953 0.27083 0.08072 0.02864 0.03906]
# References
Weitzel, Michael, et al. "13CFLUX2—high-performance software suite for 13C-metabolic flux analysis." Bioinformatics 29.1 (2013): 143-145.
APA
| github_jupyter |
```
import pandas as pd
com2 = pd.read_csv('artist_m_extracted.csv')
com2.shape
com4 = com2.copy()
com4.lyricist_m = com4.lyricist_m.str.replace("'", '', regex=False).str.replace("[", '', regex=False).str.replace("]", '', regex=False)
com4.composer_m = com4.composer_m.str.replace("'", '', regex=False).str.replace("[", '', regex=False).str.replace("]", '', regex=False)
com4.arranger_m = com4.arranger_m.str.replace("'", '', regex=False).str.replace("[", '', regex=False).str.replace("]", '', regex=False)
# 작곡 여부만 판단하는 데이터프레임 뽑기. 멤버가 참여하지 않은 곡은 모두 제외.
compose = com4[(com4['composer_m'].str.len() != 0)]
compose = compose[compose.composer_m.notnull()]
compose.tail()
len(compose)
compose.gender.value_counts()
# 작사 여부만 판단하는 데이터프레임 뽑기. 멤버가 참여하지 않은 곡은 모두 제외.
# com4['lyricist_m'] = com4['lyricist_m'].str.replace("'", "", regex=False)
lyricist = com4[(com4['lyricist_m'].str.len() != 0)]
lyricist = lyricist[lyricist.lyricist_m.notnull()]
lyricist[lyricist.artist == '레드벨벳']
len(lyricist)
lyricist.gender.value_counts()
# 편곡 여부만 판단하는 데이터프레임 뽑기. 멤버가 참여하지 않은 곡은 모두 제외.
arrange = com4[(com4['arranger_m'].str.len() != 0)]
arrange = arrange[arrange.arranger_m.notnull()]
arrange.head()
len(arrange)
arrange.gender.value_counts()
arrange[arrange.gender == '남']
# 작사작곡편곡 여부를 시계열로 보기. 우선 발매날짜를 년도만 남겨야 한다.
def leave_four(df):
df.release_date = df.release_date.astype(str)
df.release_date = df.release_date.str.replace('.', '', regex=False)
df.release_date = df['release_date'].str[:4]
df = df[df.release_date.notnull()]
leave_four(lyricist)
leave_four(compose)
leave_four(arrange)
l_time = pd.DataFrame(lyricist.groupby(['gender', 'release_date']).count()).lyricist_m
l_time
c_time = pd.DataFrame(compose.groupby(['gender', 'release_date']).count()).lyricist_m
c_time
# 2014~2015년에 포풍증가. 왜? 누구일까?
compose[compose.release_date == '2018'].groupby(['gender', 'artist']).count().composer_m
a_time = pd.DataFrame(arrange.groupby(['gender', 'release_date']).count()).lyricist_m
a_time
```
### 참여도 순위 보기
```
# 멤버가 작사에 참여한 곡 수 top 20.
test = pd.DataFrame(lyricist.groupby(['gender', 'artist']).lyricist_m.count().sort_values(ascending=False))
test.head(20)
# 멤버가 작곡에 참여한 곡 수 top 20.
test = pd.DataFrame(compose.groupby(['gender', 'artist']).composer_m.count().sort_values(ascending=False))
test.head(20)
# 멤버가 편곡에 참여한 곡 수 top 20.
test = pd.DataFrame(arrange.groupby(['gender', 'artist']).arranger_m.count().sort_values(ascending=False))
test.head(20)
```
### 방탄소년단~
```
btsl = lyricist[lyricist.artist == '방탄소년단']
artist_info = pd.read_csv('C:/Users/pje17/Desktop/Lyricsis/M5_Idol_lyrics/Data/Data20180921/artist_info_combined_ver04.csv', encoding='euc-kr')
artist_info.head()
artist_info['artist_m'] = artist_info['artist_m'].str.replace(' ', '').str.replace(r'\(.*?\)','').str.replace(')', '', regex=False).str.split(',')
artist_info[artist_info.artist=='방탄소년단'].artist_m
bts_mem = artist_info[artist_info.artist=='방탄소년단'].artist_m
bts_mem.str[1]
btsl
len(btsl[btsl.lyricist_m.str.contains('j-hope')])
bts_members = ['RM', 'SUGA', '진', 'j-hope', '지민', 'V', '정국']
for members in bts_members:
num = len(btsl[btsl.composer_m.str.contains(members)])
print(members, num)
l_count = []
c_count = []
a_count = []
for members in bts_members:
l_count.append(len(btsl[btsl.lyricist_m.str.contains(members)]))
c_count.append(len(btsl[btsl.composer_m.str.contains(members)]))
# a_count.append(len(btsl[btsl.arranger_m.str.contains(members)]))
bts_df = pd.DataFrame({'member':bts_members, 'lyrics':l_count, 'compose': c_count})
bts_df
# 가수별로 보는 과정을 함수로 만들기
def member_participation(artist):
mem = artist_info[artist_info.artist==artist].artist_m.reset_index()
l = lyricist[lyricist.artist == artist]
l_count = []
c_count = []
a_count = []
for members in mem.artist_m[0]:
l_count.append(len(l[l.lyricist_m.str.contains(members)]))
c_count.append(len(l[l.composer_m.str.contains(members)]))
# a_count.append(len(btsl[btsl.arranger_m.str.contains(members)]))
df = pd.DataFrame({'member':mem.artist_m[0], 'lyrics':l_count, 'compose': c_count})
print(df)
member_participation('블랙핑크')
# 세븐틴 보기
st_mem = artist_info[artist_info.artist=='세븐틴'].artist_m.reset_index()
st_mem
st_mem.artist_m[0]
stl = compose[compose.artist == '마마무']
for members in st_mem.artist_m[0]:
num = len(stl[stl.composer_m.str.contains(members)])
print(members, num)
for members in st_mem.artist_m[0]:
num = len(stl[stl.lyricist_m.str.contains(members)])
print(members, num)
```
### 년도별 가장 인기가 좋았던 아이돌은?
```
# 일반적인 수치를 위한 곡 정보 뽑기
com = pd.read_csv('C:/Users/pje17/Desktop/Lyricsis/M5_Idol_lyrics/SongTidy/FinalTidy/tidydata/song_tidy03.csv')
a_info = pd.read_csv('M5_Idol_lyrics/Data/Data20180921/artist_info_combined_ver04.csv', encoding='euc-kr')
com = pd.merge(com, a_info, on='artist', left_index=False)
com.head()
import matplotlib.pyplot as plt
com.shape
com.gender.unique()
# 남녀 곡수 비교
com.groupby(by='gender').title.count()
# 남녀 그룹 수 비교
artist_info.groupby(by='gender').artist.count()
# 멜론에서 가장 팬이 많은 가수
artist_info.sort_values(by='fan', ascending=False)[['artist', 'fan']].reset_index().head(30)
release = com.copy()
# 년도별 곡수를 보기 위해 발매년도의 맨 앞 4개의 digit만 살린다.
release.release_date = release.release_date.astype(str)
release.release_date = release.release_date.str.replace('.', '', regex=False)
release.release_date = release['release_date'].str[:4]
release = release[release.release_date.notnull()]
release.shape
release[release.title =='너의결혼식']
date = release.groupby(by='release_date').title.count().reset_index()
date = date.drop(0)
date
date.release_date = date.release_date.astype(int)
date.plot.line(x='release_date', y= 'title')
```
년도별 인기가 가장 좋았던 아이돌은 누구일까? 이를 알기 위해선 해당 년도의 곡의 인기도를 분석해보고자한다. release_date로 묶은 뒤 인기도로 sort_values를 해주자.
```
release = release[release.release_date != '-']
year_top = release[['title', 'artist', 'like', 'release_date']]
year_top.head()
# 좋아요 개수가 int가 아니기 때문에 먼저 쉼표를 날리고 인트로 변경
year_top.like = year_top.like.str.replace(',', '', regex=False).astype(int)
year_top.head()
year_top.release_date = year_top.release_date.astype(int)
year_top.info()
date_top = release[['title', 'artist', 'like', 'release_date']].groupby(by='release_date').apply(lambda x: x['like'].sort_values(ascending=False)).reset_index()
# apply(lambda x: x['time'].sort_values()).reset_index()
year_top.sort_values(by='like', ascending=False).groupby('release_date').head(1).sort_values(by='release_date', ascending=True)
# ['like'].rank(ascending=False)
artist_best = year_top.sort_values(by='like', ascending=False).groupby('artist').head(1).sort_values(by='artist', ascending=True).reset_index()
artist_best[artist_best.artist.isin(['레드벨벳', '방탄소년단'])]
# 년도별 데뷔 그룹 수
# artist_info.sort_values(by='fan', ascending=False)[['artist', 'fan']].reset_index().head(20)
artist_info.groupby('debut_y').count().artist.reset_index()
```
### d3 시각화를 위한 전처리 및 파일 추출
```
# 멜론에서 가장 팬이 많은 가수
top30 = artist_info.sort_values(by='fan', ascending=False)[['artist', 'fan', 'gender']].reset_index().head(30)
top30
top30 = top30.drop(top30.columns[0], axis=1)
top30
# 가수별 곡 수 카운트
songs = com.copy()
songs.head(1)
songartist = songs.groupby(by='artist').title.count().reset_index()
songartist.head()
top30songs = pd.merge(top30, songartist, on='artist')
top30songs
# top30 가수별 최고 인기곡은?
artist_best[artist_best.artist.isin(top30songs.artist)]
# 빅스 1위 곡은 함께 부른 노래기 때문에 제외
vixx = year_top.sort_values(by='like', ascending=False).groupby('artist').head(5)
vixx[vixx.artist == '에프엑스']
```
| github_jupyter |
## Largest Product of Three from List
Given a list of integers, return the largest product that can be made by multiplying any three integers. For example, if the list is [-10,-10,5,2] you should return 500. You can assume that the list has at least three integers.
```
# If the list is all positive, then it's trivial. Just sort and then multiply
import numpy as np
lst = [np.random.randint(1,30) for i in range(50)]
print(lst)
# Could use number_list.sort() if you want without assigning to a new_list
def product_naive(number_list):
new_list = sorted(number_list, reverse = True)
product = new_list[0] * new_list[1] * new_list[2]
return product
product_naive(lst) # nice works
```
Now what can you do if there are negative numbers? Maybe start from the beginning and end of a sorted list .. Because we know that there are three numbers, we know that we need at least two negative numbers or else we're in trouble.
```
def product_three(number_list):
new_list = sorted(number_list, reverse = True)
# there are really only two combinations here that make sense...
# the product has to be the last 2 negative numbers and the largest positive
# or it has to be the three largest numbers in the front
positive_product = new_list[0] * new_list[1] * new_list[2]
mixed_product = new_list[0] * new_list[len(new_list)-1] * new_list[len(new_list)-2]
# alternatively you could use new_list[-1] instead of len(new_list)-1 .....
if positive_product > mixed_product:
return positive_product
else:
return mixed_product
product_three(lst)
if product_three(lst) == product_naive(lst):
print("TRUE")
example = [-10,-10,5,2]
product_three(example)
```
### This is kind of unsatisfying because it abuses the fact that we have three numbers.
And so it might not generalize well outside ...
### This also runs in O(N logN) time since we have to sort the input array... yikes
To do this faster you should use select. This requires us to use the math library.
```
from math import inf
def maximum_product(lst):
max1, max2, max3, min1, min2 = -inf, -inf, -inf, inf, inf
for x in lst:
if x > max1:
max3 = max2
max2 = max1
max1 = x
elif x > max2:
max3 = max2
max2 = x
elif x > max3:
max3 = x
if x < min1:
min2 = min1
min1 = x
elif x < min2:
min2 = x
return max(max1 * max2 * max3, max1 * min1 * min2)
```
| github_jupyter |
# 1.PaddleGAN实现精准唇形合成-- 物理学界大佬们再次合唱
## 1.1 宋代著名诗人苏轼「动起来」的秘密
坐拥百万粉丝的**独立艺术家大谷Spitzer老师**利用深度学习技术使**宋代诗人苏轼活过来,穿越千年,为屏幕前的你们亲自朗诵其著名古诗~** [点击量](https://www.bilibili.com/video/BV1mt4y1z7W8)近百万,同时激起百万网友热议,到底是什么技术这么牛气?

## 1.2 PaddleGAN的唇形迁移能力--Wav2lip
**铛铛铛!!飞桨[PaddleGAN](https://github.com/PaddlePaddle/PaddleGAN)这就来给大家揭秘,手把手教大家如何实现唇型的迁移,学习过本项目的你们,从此不仅能让苏轼念诗,还能让蒙娜丽莎播新闻、新闻主播唱Rap... 只有你想不到的,没有[PaddleGAN](https://github.com/PaddlePaddle/PaddleGAN)做不到的!**
本教程是基于[PaddleGAN](https://github.com/PaddlePaddle/PaddleGAN)实现的视频唇形同步模型**Wav2lip**, 它实现了人物口型与输入语音同步,俗称「对口型」。 比如这样:

**不仅仅让静态图像会「说话」,Wav2lip还可以直接将动态的视频,进行唇形转换,输出与目标语音相匹配的视频,自制视频配音不是梦!**
本次教程包含四个部分:
- Wav2lip原理讲解
- 下载PaddleGAN代码
- 唇形动作合成命令使用说明
- 成果展示
**若是大家喜欢这个教程,欢迎到[Github PaddleGAN主页](https://github.com/PaddlePaddle/PaddleGAN)点击star呀!下面就让我们一起动手实现吧!**
<div align='center'>
<img src='https://ai-studio-static-online.cdn.bcebos.com/47cea097a0284dd39fc2804a53aa8ee6dad16ffe104641258046eb05af49cd64' width='1000'/>
</div>
## 1.3 Wav2lip模型原理
Wav2lip实现唇形与语音精准同步突破的关键在于,它采用了**唇形同步判别器,以强制生成器持续产生准确而逼真的唇部运动。**
此外,该研究通过在鉴别器中,使用**多个连续帧而不是单个帧,并使用视觉质量损失(而不仅仅是对比损失)来考虑时间相关性,从而改善了视觉质量。**
该wav2lip模型几乎是**万能**的,适用于任何**人脸**、**任何语音**、**任何语言**,对任意视频都能达到很高的准确率,可以无缝地与原始视频融合,还可以用于**转换动画人脸,并且导入合成语音**也是可行的
## 1.4 下载PaddleGAN代码
```
# 从github上克隆PaddleGAN代码(如下载速度过慢,可用gitee源)
#!git clone https://github.com/PaddlePaddle/PaddleGAN
%cd /home/aistudio/
# 如果已经存在PaddleGAN仓库,可注释下面两行代码,或者从下面安装所需安装包开始
!git clone https://gitee.com/paddlepaddle/PaddleGAN.git
!git checkout develop
# 安装所需安装包
%cd /home/aistudio/PaddleGAN
!pip install -r requirements.txt
!pip install imageio-ffmpeg
%cd applications/
```
## 1.5 唇形动作合成命令使用说明
重点来啦!!本项目支持大家上传自己准备的视频和音频, 合成任意想要的**逼真的配音视频**!!
只需在如下命令中的**face参数**和**audio参数**分别换成自己的视频和音频路径,然后运行如下命令,就可以生成和音频同步的视频。
程序运行完成后,会在当前文件夹下生成文件名为**outfile**参数指定的视频文件,该文件即为和音频同步的视频文件。本项目中提供了demo展示所用到的视频和音频文件。具体的参数使用说明如下:
- face: 原始视频,视频中的人物的唇形将根据音频进行唇形合成--通俗来说,想让谁说话
- audio:驱动唇形合成的音频,视频中的人物将根据此音频进行唇形合成--通俗来说,想让这个人说什么
```
# 视频保存地址:/home/aistudio/work/pp_put.mp4
# 时间10min左右
!export PYTHONPATH=$PYTHONPATH:/home/aistudio/PaddleGAN && python -u tools/wav2lip.py --face /home/aistudio/work/1.mp4 --audio /home/aistudio/work/1.m4a --outfile /home/aistudio/work/pp_put.mp4
```
## 1.6 效果展示
效果不是特别好,以后会改进本项目

## 1.7 总结
**首先帮大家总结一波:让图片会说话、视频花式配音的魔法--Wav2lip的使用只用三步**:
1. 安装Paddle环境并下载[PaddleGAN](https://github.com/PaddlePaddle/PaddleGAN)
2. 选择想要「配音/对口型」的对象以及音频内容
3. 运行代码并保存制作完成的对口型视频分享惊艳众人
贴心的送上项目传送门:[PaddleGAN](https://github.com/PaddlePaddle/PaddleGAN) 记得点Star关注噢~~
<div align='left'>
<img src='https://ai-studio-static-online.cdn.bcebos.com/c7e2bcd255574e32b10061e0c4a1003a244bb7bd60ad43d394b23183f7390175' width='300'/>
</div>
## 1.8 除了嘴型同步,PaddleGAN还有哪些魔法?
PaddleGAN是只能做「对口型」的应用么?NONONO!当然不是!!
<div align='center'>
<img src='https://ai-studio-static-online.cdn.bcebos.com/f3b7e65df22a4e0fb771db150886dfd93ff602ebf8374fe0bf20e2083f5b1213' width='100'/>
</div>
接下来就给大家展示下PaddleGAN另外的花式应用,如各类**图形影像生成、处理能力**。
**人脸属性编辑能力**能够在人脸识别和人脸生成基础上,操纵面部图像的单个或多个属性,实现换妆、变老、变年轻、变换性别、发色等,一键换脸成为可能;
**动作迁移**,能够实现肢体动作变换、人脸表情动作迁移等等等等。
强烈鼓励大家玩起来,激发PaddleGAN的潜能!
<div align='center'>
<img src='https://ai-studio-static-online.cdn.bcebos.com/461d1f34cf5242fca07d4e333e41f51c099a96017e324531b575a775d0679fc6' width='700'/>
</div>
<div align='center'>
<img src='https://ai-studio-static-online.cdn.bcebos.com/7d2cc83c689c474e8f3c0fa85e58e12b9885b47333d94d4dba4c66e622acf47e' width='700'/>
</div>
欢迎加入官方QQ群(1058398620)与各路技术高手交流~~
<div align='center'>
<img src='https://ai-studio-static-online.cdn.bcebos.com/eb4d10d066c547f19cb373eb72458b12703e1c5b2ea34457b225d958925c2c83' width='250' height='300'/>
</div>
# 2.参考资料
【PaddleGAN的Github地址】:https://github.com/PaddlePaddle/PaddleGAN
【PaddleGAN的Gitee地址】:https://gitee.com/PaddlePaddle/PaddleGAN
【生成对抗网络七日打卡营】课程链接:https://aistudio.baidu.com/aistudio/course/introduce/16651
【生成对抗网络七日打卡营】项目合集:https://aistudio.baidu.com/aistudio/projectdetail/1807841
【图像分割7日打卡营常见问题汇总】
https://aistudio.baidu.com/aistudio/projectdetail/1100155
【PaddlePaddle使用教程】
https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/index_cn.html
【本地安装PaddlePaddle的常见错误】
https://aistudio.baidu.com/aistudio/projectdetail/697227
【API文档】
https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/index_cn.html
【PaddlePaddle/hapi Github】
https://github.com/PaddlePaddle/hapi
【Github使用】
https://guides.github.com/activities/hello-world/
# 3.个人介绍
> 中南大学 机电工程学院 机械工程专业 2019级 研究生 雷钢
> 百度飞桨官方帮帮团成员
> Github地址:https://github.com/leigangblog
> B站:https://space.bilibili.com/53420969
来AI Studio互关吧,等你哦~ https://aistudio.baidu.com/aistudio/personalcenter/thirdview/118783
欢迎大家fork喜欢评论三连,感兴趣的朋友也可互相关注一下啊~
| github_jupyter |
```
import pandas as pd
import datetime as dt
from pathlib import Path
import json
print("Importing Complete")
# Let's take a look at the past sp500 tickers.
def get_sp500_constituents_records(filepath):
'''Gets SP500 constituents records from the specified filepath.
Args:
filepath: string of where the SP500 constituents records is saved.
Returns:
sp500_constituents_records: pandas dataframe. If the filepath does not point
to the file location; None will be returned.
'''
sp500_constituents_file = Path(filepath)
if sp500_constituents_file.is_file():
sp500_constituents_records = pd.read_csv(filepath, index_col='date')
return sp500_constituents_records
else:
print('Could not find SP500 Constituents Records')
return
filepath = 'p1inputs/S&P 500 Historical Components & Changes.csv'
sp500_constituents_records = get_sp500_constituents_records(filepath)
display(sp500_constituents_records.head())
# Since the file was imported from a CSV we will need to change the tickers columns to lists and dates to datetimes respecitvely.
# This will allow us to operate on the index and tickers using python operations.
def format_sp500_constituents_records(df):
'''Formats SP500 constituents records by splitting up individual tickers and changing date strings to datetimes.'''
df['tickers'] = df['tickers'].apply(lambda x: sorted(x.split(','))) # Change each ticker row to lists.
df.index = pd.to_datetime(df.index, format = '%Y-%m-%d') # Change date (str) index to a datetime index.
return df
sp500_constituents_records = format_sp500_constituents_records(sp500_constituents_records)
# We only need the past 15 years of data, so let's remove the rows that we don't need from the dataframe.
def slice_sp500_constituents_records(df, start_date, end_date):
'''Slices SP500 constituents records to specified start and end dates.'''
start_date = dt.datetime.strptime(start_date, '%Y-%m-%d') # Change string dates to datetime for pandas to compare them.
end_date = dt.datetime.strptime(end_date, '%Y-%m-%d')
date_ranged_df = df.loc[start_date:end_date]
return date_ranged_df
start_date = '2007-01-01' # Change your dates as needed for your strategy.
end_date = '2022-01-16'
sp500_changes = slice_sp500_constituents_records(sp500_constituents_records, start_date, end_date)
display(sp500_changes.head(5)) # Double check that the start and end dates were sliced correctly.
display(sp500_changes.tail(5))
# Finally lets collect all the tickers that were in the SP500 from our date range.
def collect_all_sp500_constituents(df):
'''Returns an alphabetically sorted list of all constituents that were in the SP500.'''
sp500_constituents = set()
for years_constituents in df['tickers']:
sp500_constituents = sp500_constituents | set(years_constituents)
return sorted(sp500_constituents)
all_sp500_constituents = collect_all_sp500_constituents(sp500_changes)
print(f"There were {len(all_sp500_constituents)} total SP500 constituents between {start_date} to {end_date}")
# We will save both all_sp500_constituents and date_ranged_sp500_constituents as jsons.
# sp500_constituents will be all the tickers that need to be download from yahoo finance data to form our 15yr sp500 database.
# date_ranged_sp500_constituents will be used to control which tickers the backtester sees as it trades through the years.
all_sp500_constituents_filepath = 'S&P500 Consitutents 20070101-20220116.json' # Include your filepath here.
with open(all_sp500_constituents_filepath, 'w', encoding = 'utf-8') as f: # Use json.dump to save a list as a json.
json.dump(all_sp500_constituents, f, ensure_ascii=False, indent=4)
sp500_changes_filepath = 'S&P500 Changes 20070101-20220116.json' # Include your filepath here
sp500_changes.to_json(sp500_changes_filepath) # Use to_json to save a pandas dataframe as a json.
```
| github_jupyter |
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import ram
tf.enable_eager_execution()
train = ram.dataset.train('~/data/mnist/')
batch_size = 5
ram_model = ram.RAM(batch_size=batch_size)
optimizer = tf.train.MomentumOptimizer(momentum=0.9, learning_rate=0.001)
batch = train.batch(batch_size)
img, lbl = next(iter(batch))
# training loop
out_t_minus_1 = ram_model.reset()
locs = []
mus = []
log_pis = []
baselines = []
with tf.GradientTape(persistent=True) as tape:
for t in range(ram_model.glimpses):
out = ram_model.step(img, out_t_minus_1.l_t, out_t_minus_1.h_t)
locs.append(out.l_t)
mus.append(out.mu)
baselines.append(out.b_t)
# determine probability of choosing location l_t, given
# distribution parameterized by mu (output of location network)
# and the constant standard deviation specified as a parameter.
# Assume both dimensions are independent
# 1. we get log probability from pdf for each dimension
# 2. we want the joint distribution which is the product of the pdfs
# 3. so we sum the log prob, since log(p(x) * p(y)) = log(p(x)) + log(p(y))
mu_distrib = tf.distributions.Normal(loc=out.mu,
scale=ram_model.loc_std)
log_pi = mu_distrib.log_prob(value=out.l_t)
log_pi = tf.reduce_sum(log_pi, axis=1)
log_pis.append(log_pi)
out_t_minus_1 = out
# convert lists to tensors, reshape to (batch size x number of glimpses)
# for calculations below
baselines = tf.stack(baselines)
baselines = tf.squeeze(baselines)
baselines = tf.transpose(baselines, perm=[1, 0])
log_pis = tf.stack(log_pis)
log_pis = tf.squeeze(log_pis)
log_pis = tf.transpose(log_pis, perm=[1, 0])
# repeat column vector n times where n = glimpses
# calculate reward
predicted = tf.argmax(out.a_t, axis=1, output_type=tf.int32) # a_t = predictions from last time step
R = tf.equal(predicted, lbl)
R = tf.cast(R, dtype=tf.float32)
# reshape reward to (batch size x number of glimpses)
R = tf.expand_dims(R, axis=1) # add axis
R = tf.tile(R, tf.constant([1, ram_model.glimpses]))
# compute losses for differentiable modules
loss_action = tf.losses.softmax_cross_entropy(tf.one_hot(lbl, depth=ram_model.num_classes), out.a_t)
loss_baseline = tf.losses.mean_squared_error(baselines, R)
# compute loss for REINFORCE algorithm
# summed over timesteps and averaged across batch
adjusted_reward = R - baselines
loss_reinforce = tf.reduce_sum((-log_pis * adjusted_reward), axis=1)
loss_reinforce = tf.reduce_mean(loss_reinforce)
# sum up into hybrid loss
hybrid_loss = loss_action + loss_baseline + loss_reinforce
# apply reinforce loss **only** to location network and baseline network
lt_bt_params = [var for net in [ram_model.location_network,
ram_model.baseline]
for var in net.variables]
reinforce_grads = tape.gradient(loss_reinforce, lt_bt_params)
optimizer.apply_gradients(zip(reinforce_grads, lt_bt_params),
global_step=tf.train.get_or_create_global_step())
# apply hybrid loss to glimpse network, core network, and action network
params = [var for net in [ram_model.glimpse_network,
ram_model.action_network,
ram_model.core_network]
for var in net.variables]
hybrid_grads = tape.gradient(hybrid_loss, params)
optimizer.apply_gradients(zip(hybrid_grads, params),
global_step=tf.train.get_or_create_global_step())
```
| github_jupyter |
```
import json
import numpy as np
import pandas as pd
from sklearn.feature_extraction import text
from sklearn.linear_model import LogisticRegression
import sklearn.model_selection as modsel
import sklearn.preprocessing as preproc
```
## Load and prep Yelp reviews data
```
## Load Yelp Business data
biz_f = open('data/yelp/v6/yelp_dataset_challenge_academic_dataset/yelp_academic_dataset_business.json')
biz_df = pd.DataFrame([json.loads(x) for x in biz_f.readlines()])
biz_f.close()
## Load Yelp Reviews data
review_file = open('data/yelp/v6/yelp_dataset_challenge_academic_dataset/yelp_academic_dataset_review.json')
review_df = pd.DataFrame([json.loads(x) for x in review_file.readlines()])
review_file.close()
biz_df.shape
review_df.shape
# Pull out only Nightlife and Restaurants businesses
two_biz = biz_df[biz_df.apply(lambda x: 'Nightlife' in x['categories']
or 'Restaurants' in x['categories'],
axis=1)]
two_biz.shape
biz_df.shape
## Join with the reviews to get all reviews on the two types of business
twobiz_reviews = two_biz.merge(review_df, on='business_id', how='inner')
twobiz_reviews.shape
twobiz_reviews.to_pickle('data/yelp/v6/yelp_dataset_challenge_academic_dataset/twobiz_reviews.pkl')
twobiz_reviews = pd.read_pickle('data/yelp/v6/yelp_dataset_challenge_academic_dataset/twobiz_reviews.pkl')
# Trim away the features we won't use
twobiz_reviews = twobiz_reviews[['business_id',
'name',
'stars_y',
'text',
'categories']]
# Create the target column--True for Nightlife businesses, and False otherwise
twobiz_reviews['target'] = twobiz_reviews.apply(lambda x: 'Nightlife' in x['categories'],
axis=1)
## Now pull out each class of reviews separately,
## so we can create class-balanced samples for training
nightlife = twobiz_reviews[twobiz_reviews.apply(lambda x: 'Nightlife' in x['categories'], axis=1)]
restaurants = twobiz_reviews[twobiz_reviews.apply(lambda x: 'Restaurants' in x['categories'], axis=1)]
nightlife.shape
restaurants.shape
nightlife_subset = nightlife.sample(frac=0.1, random_state=123)
restaurant_subset = restaurants.sample(frac=0.021, random_state=123)
nightlife_subset.shape
restaurant_subset.shape
nightlife_subset.to_pickle('data/yelp/v6/yelp_dataset_challenge_academic_dataset/nightlife_subset.pkl')
restaurant_subset.to_pickle('data/yelp/v6/yelp_dataset_challenge_academic_dataset/restaurant_subset.pkl')
nightlife_subset = pd.read_pickle('data/yelp/v6/yelp_dataset_challenge_academic_dataset/nightlife_subset.pkl')
restaurant_subset = pd.read_pickle('data/yelp/v6/yelp_dataset_challenge_academic_dataset/restaurant_subset.pkl')
combined = pd.concat([nightlife_subset, restaurant_subset])
combined['target'] = combined.apply(lambda x: 'Nightlife' in x['categories'],
axis=1)
combined
# Split into training and test data sets
training_data, test_data = modsel.train_test_split(combined,
train_size=0.7,
random_state=123)
training_data.shape
test_data.shape
# Represent the review text as a bag-of-words
bow_transform = text.CountVectorizer()
X_tr_bow = bow_transform.fit_transform(training_data['text'])
len(bow_transform.vocabulary_)
X_tr_bow.shape
X_te_bow = bow_transform.transform(test_data['text'])
y_tr = training_data['target']
y_te = test_data['target']
# Create the tf-idf representation using the bag-of-words matrix
tfidf_trfm = text.TfidfTransformer(norm=None)
X_tr_tfidf = tfidf_trfm.fit_transform(X_tr_bow)
X_te_tfidf = tfidf_trfm.transform(X_te_bow)
X_tr_l2 = preproc.normalize(X_tr_bow, axis=0)
X_te_l2 = preproc.normalize(X_te_bow, axis=0)
```
## Classify with logistic regression
```
def simple_logistic_classify(X_tr, y_tr, X_test, y_test, description, _C=1.0):
## Helper function to train a logistic classifier and score on test data
m = LogisticRegression(C=_C).fit(X_tr, y_tr)
s = m.score(X_test, y_test)
print ('Test score with', description, 'features:', s)
return m
m1 = simple_logistic_classify(X_tr_bow, y_tr, X_te_bow, y_te, 'bow')
m2 = simple_logistic_classify(X_tr_l2, y_tr, X_te_l2, y_te, 'l2-normalized')
m3 = simple_logistic_classify(X_tr_tfidf, y_tr, X_te_tfidf, y_te, 'tf-idf')
```
## Tune regularization parameters using grid search
```
param_grid_ = {'C': [1e-5, 1e-3, 1e-1, 1e0, 1e1, 1e2]}
bow_search = modsel.GridSearchCV(LogisticRegression(), cv=5, param_grid=param_grid_)
l2_search = modsel.GridSearchCV(LogisticRegression(), cv=5,
param_grid=param_grid_)
tfidf_search = modsel.GridSearchCV(LogisticRegression(), cv=5,
param_grid=param_grid_)
bow_search.fit(X_tr_bow, y_tr)
bow_search.best_score_
l2_search.fit(X_tr_l2, y_tr)
l2_search.best_score_
tfidf_search.fit(X_tr_tfidf, y_tr)
tfidf_search.best_score_
bow_search.best_params_
l2_search.best_params_
tfidf_search.best_params_
bow_search.cv_results_
import pickle
results_file = open('tfidf_gridcv_results.pkl', 'wb')
pickle.dump(bow_search, results_file, -1)
pickle.dump(tfidf_search, results_file, -1)
pickle.dump(l2_search, results_file, -1)
results_file.close()
pkl_file = open('tfidf_gridcv_results.pkl', 'rb')
bow_search = pickle.load(pkl_file)
tfidf_search = pickle.load(pkl_file)
l2_search = pickle.load(pkl_file)
pkl_file.close()
search_results = pd.DataFrame.from_dict({'bow': bow_search.cv_results_['mean_test_score'],
'tfidf': tfidf_search.cv_results_['mean_test_score'],
'l2': l2_search.cv_results_['mean_test_score']})
search_results
```
## Plot cross validation results
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
ax = sns.boxplot(data=search_results, width=0.4)
ax.set_ylabel('Accuracy', size=14)
ax.tick_params(labelsize=14)
plt.savefig('tfidf_gridcv_results.png')
m1 = simple_logistic_classify(X_tr_bow, y_tr, X_te_bow, y_te, 'bow',
_C=bow_search.best_params_['C'])
m2 = simple_logistic_classify(X_tr_l2, y_tr, X_te_l2, y_te, 'l2-normalized',
_C=l2_search.best_params_['C'])
m3 = simple_logistic_classify(X_tr_tfidf, y_tr, X_te_tfidf, y_te, 'tf-idf',
_C=tfidf_search.best_params_['C'])
bow_search.cv_results_['mean_test_score']
```
| github_jupyter |
```
import seaborn as sns; sns.set(color_codes=True)
tips = sns.load_dataset("tips")
print(tips[:5])
print(len(tips))
ax = sns.regplot(x="total_bill", y="tip", data=tips)
import matplotlib.pyplot as plt
g = sns.FacetGrid(tips, hue="sex", size=6, aspect=2)
g.map(plt.scatter, "total_bill", "tip")
g.add_legend()
import random
import numpy as np
x = np.arange(0,1,0.01)
y = 3*x*x - 2*x + 8
y = y + random.sample(np.arange(-0.2,0.2,0.4/len(y)), len(y))
plt.plot(y, marker='o', linestyle='None')
ax = sns.regplot(x, y, ci=None, truncate=True)
ax = sns.regplot(x, y, order=2, ci=None, truncate=True)
ax = sns.regplot(x, y, order=12, ci=None, truncate=True)
import tensorflow as tf
import pandas as pd
import seaborn as sns;
tips = sns.load_dataset("tips")
tips = tips.sample(frac=1.0)
trainsize = int(len(tips) * 0.8)
df_train = tips[:trainsize]
print(df_train[:5])
# working with numpy arrays works
tf.logging.set_verbosity(tf.logging.INFO)
predictors = df_train.loc[:,['total_bill', 'size']].values # np.ndarray
targets = df_train.iloc[:,1].values
features = tf.contrib.learn.infer_real_valued_columns_from_input(predictors)
model = tf.contrib.learn.LinearRegressor(feature_columns=features)
model.fit(predictors, targets, steps=1000)
arr = [[x for x in np.append(np.zeros(9), 1.0)] for row in range(0,10)]
for row in arr:
np.random.shuffle(row)
print(np.array(arr))
%bash
pip install scikit-image
import skimage
import skimage.io
import skimage.filters
import os
from skimage.color import rgb2gray
desert = rgb2gray(skimage.io.imread('algodones-dunes-1654439_1920.jpg'))
skimage.io.use_plugin('matplotlib', 'imread')
skimage.io.imshow(desert)
edges = np.abs(skimage.filters.sobel_h(desert))
from skimage.filters.rank import maximum
from skimage.morphology import disk
out = 255 - maximum(edges, disk(5))
skimage.io.imshow(out)
smooth = skimage.filters.rank.mean(desert, disk(2))
edges = np.abs(skimage.filters.sobel_h(smooth))
from skimage.filters.rank import maximum
from skimage.morphology import disk
out = 255 - maximum(edges, disk(5))
skimage.io.imshow(out)
```
<h3> Benchmarking early models </h3>
```
import datalab.bigquery as bq
import numpy as np
import pandas as pd
def create_query(phase, EVERY_N):
"""
phase: 1=train 2=valid
"""
base_query = """
SELECT
DAYOFWEEK(pickup_datetime)*1.0 AS dayofweek,
HOUR(pickup_datetime)*1.0 AS hourofday,
pickup_longitude, pickup_latitude,
dropoff_longitude, dropoff_latitude,
passenger_count*1.0 AS passenger_count,
(tolls_amount + fare_amount) as fare_amount
FROM
[nyc-tlc:yellow.trips]
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
if EVERY_N == None:
if phase < 2:
# training
query = "{0} AND ABS(HASH(pickup_datetime)) % 4 < 2".format(base_query)
else:
query = "{0} AND ABS(HASH(pickup_datetime)) % 4 == {1}".format(base_query, phase)
else:
query = "{0} AND ABS(HASH(pickup_datetime)) % {1} == {2}".format(base_query, EVERY_N, phase)
return query
def distance_between(lat1, lon1, lat2, lon2):
# haversine formula to compute distance "as the crow flies". Taxis can't fly of course.
dist = np.degrees(np.arccos(np.sin(np.radians(lat1)) * np.sin(np.radians(lat2)) + np.cos(np.radians(lat1)) * np.cos(np.radians(lat2)) * np.cos(np.radians(lon2 - lon1)))) * 60 * 1.515 * 1.609344
return dist
def estimate_distance(df):
return distance_between(df['pickup_latitude'], df['pickup_longitude'], df['dropoff_latitude'], df['dropoff_longitude'])
def compute_rmse(actual, predicted):
return np.sqrt(np.mean((actual-predicted)**2))
def print_rmse(df, rate, name):
print("{1} RMSE = {0}".format(compute_rmse(df['fare_amount'], rate*estimate_distance(df)), name))
query = create_query(2, 100000)
df_valid = bq.Query(query).to_dataframe()
print_rmse(df_valid, 2.56, 'Final Validation Set')
%%mlalpha train --cloud
package_uris: gs://cloud-training-demos-ml/taxifare/source4b/taxifare.tar.gz
python_module: trainer.task
scale_tier: BASIC
region: us-central1
args:
train_data_paths: gs://cloud-training-demos-ml/taxifare/taxi_preproc4a/features_train*
eval_data_paths: gs://cloud-training-demos-ml/taxifare/taxi_preproc4a/features_eval*
metadata_path: gs://cloud-training-demos-ml/taxifare/taxi_preproc4a/metadata.yaml
output_path: gs://cloud-training-demos-ml/taxifare/taxi_trained4b/eval
max_steps: 2500
hidden_layer1_size: 147
number_buckets: 19
learning_rate: 0.047
batch_size: 512
%mlalpha jobs --name trainer_task_161012_212122
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
df = pd.DataFrame({'Lab' : pd.Series(['1a', '2c', '3a', '4a', '4b', '4c']),
'Method' : pd.Series(['Heuristic', 'tf.learn', '+ Feature Eng', '+ Hyperparam', '+ 500m rows']),
'RMSE': pd.Series([8.026, 10.344, 6.38, 6.28, 3.86]) })
ax = sns.barplot(data=df, x='Method', y='RMSE')
ax.set_ylabel('RMSE (dollars)')
ax.set_xlabel('CPB102 labs (methods)')
plt.plot(np.linspace(-20,120,1000), [5]*1000, 'b')
```
| github_jupyter |
# Generating CLEAN Results
## Load environment
```
%matplotlib inline
import sys
# Directories and paths
lib_path = '/gpfswork/rech/xdy/uze68md/GitHub/'
data_path = '/gpfswork/rech/xdy/uze68md/data/'
model_dir = '/gpfswork/rech/xdy/uze68md/trained_models/model_cfht/'
# Add library path to PYTHONPATH
path_alphatransform = lib_path+'alpha-transform'
path_score = lib_path+'score'
path_clean = '/gpfswork/rech/xdy/uze68md/GitHub/ShapeDeconv/data/T-RECS/clean/'
sys.path.insert(0, path_alphatransform)
sys.path.insert(0, path_score)
sys.path.insert(0, path_clean)
# Function
def sigma_mad(signal):
"""This function returns the estimate of the standard deviation of White
Additive Gaussian Noise using the Mean Absolute Deviation method (MAD).
INPUT: signal, Numpy Array
OUTPUT: sigma, scalar"""
sigma = 1.4826*np.median(np.abs(signal-np.median(signal)))
return sigma
# Libraries
from CLEAN3 import doCLEAN,gauss2D
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import fft
import cadmos_lib as cl
import tensorflow as tf
import galsim
from galsim import Image
import galsim.hsm
import galflow as gf
from galaxy2galaxy import problems
import pickle
from cv2 import resize, INTER_AREA
from scipy.signal import convolve
```
## Load Data
```
# Load Data
f = open(data_path+"meerkat_batch.pkl", "rb")
batch = pickle.load(f)
f.close()
# Normalize PSF by the max
batch['psf'] = np.array([psf/np.max(psf) for psf in batch['psf']])
n_batch, Nx, Ny = batch['targets'].shape
```
## Apply CLEAN
```
restored,restorednores,residual,skymodelimg = np.zeros((4,n_batch,Nx,Ny))
skymodellist = []
sigma_flags = np.ones(n_batch,dtype=bool)
for i in range(n_batch):
# running CLEAN
sigma_im = sigma_mad(batch['inputs'][i])
n=3
while np.max(batch['inputs'][i])<n*sigma_im:
n = n-1
sigma_flags[i] = False
if i%100 == 0:
print(i)
print('n :',n)
restored[i],restorednores[i],residual[i],skymodellist_temp,skymodelimg[i]=doCLEAN(batch['inputs'][i],batch['psf'][i],gain=0.1,niter=5000,fthresh=n*sigma_im)
skymodellist += [skymodellist_temp]
```
## Reconstruct with a Gaussian PSF
```
# Generate isotropic Gaussian PSF
# preparing gridding
imsize = 128
x=np.arange(0,imsize)-imsize//2
y=np.arange(0,imsize)-imsize//2
xx,yy=np.meshgrid(x,y)
gauss_iso = gauss2D(xx,yy,1,0,0,2,0.,0.) # gridx, gridy, amp, meanx, meany, std, e1, e2
# normalize Gaussian PSF with the flux of the PSF
gauss_psf = np.array([gauss_iso / gauss_iso.sum() * psf.sum() for psf in batch['psf']])
restored_iso = np.array([convolve(sm,psf,'same') for sm,psf in zip(skymodelimg,gauss_psf)])
```
## Build Result Dictionnary
```
keys = ['restored_residual', 'restored', 'restored_isotropic', 'residual', 'skymodel', 'skymodel_list', 'sigma_flags']
values = [restored,restorednores,restored_iso,residual,skymodelimg,skymodellist,sigma_flags]
results = {}
for key,value in zip(keys,values):
results[key] = value
```
## Save Result Dictionnary
```
f = open(data_path+"clean_results.pkl","wb")
pickle.dump(results,f)
f.close()
```
| github_jupyter |
# Importing the libraries
```
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
```
# Load Data
```
dataset=pd.read_csv('OnlineRetail.csv',encoding='latin1')
dataset.head()
dataset.describe()
dataset.info()
```
# Data Preprocessing
We are going to analysis the Customers based on below 3 factors:
+ R (Recency): Number of days since last purchase
+ F (Frequency): Number of tracsactions
+ M (Monetary): Total amount of transactions (revenue contributed)
```
# Add New attribute Amount
dataset['Amount']= dataset['Quantity']*dataset['UnitPrice']
dataset.head()
```
Pandas datetimeindex provides efficient way to extract year, month or day from string format date.
docs: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DatetimeIndex.html
You can also use the formula (dataset['InvoiceDate'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s') to calculate unixtime
```
# Convert date to unixtime
dataset['InvoiceDate']= pd.to_datetime(dataset['InvoiceDate'])
dataset['InvoiceDate']=(dataset['InvoiceDate'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
dataset.head()
# Add New attribute Recency
max_date=max(dataset['InvoiceDate'])
print(max_date)
dataset['timeDiff']= max_date - dataset['InvoiceDate']
dataset.head()
# drop columns which are not used for segmentation
df = dataset.drop(['StockCode', 'Description','Quantity', 'UnitPrice','InvoiceDate'], axis=1)
df.head()
# aggregate amount based on CustomerID
amt_df = df.groupby('CustomerID')['Amount'].sum()
amt_df.head()
# get count of Invoices for the CustomerID
fre_df = df.groupby('CustomerID')['InvoiceNo'].count()
fre_df.head()
# get most recent transation of the customer
rec_df = df.groupby('CustomerID')['timeDiff'].min() # last transaction
rec_df.head()
# Create a dataframe with only required fields. We do this by merging the amt_df, fre_df and rec_df dataframe
data = pd.merge(amt_df,fre_df,on='CustomerID',how='inner')
data.head()
data = pd.merge(data,rec_df,on='CustomerID',how='inner')
data.head()
# Rename the columns to Frequency and Recency
data = data.rename(columns={"InvoiceNo": "Frequency", "timeDiff": "Recency"})
data.head()
# verify the data type
numerical = [var for var in data.columns if data[var].dtype!='O']
print('There are {} numerical variables : \n'.format(len(numerical)), numerical)
categorical = [var for var in data.columns if data[var].dtype=='O']
print('There are {} categorical variables : \n'.format(len(categorical)), categorical)
# view summary statistics in numerical variables
print(round(data[numerical].describe()),2)
# Check for Nan
data[data.isnull().any(axis=1)]
```
# Build Model
```
# Encoding data
from sklearn.preprocessing import StandardScaler
scaleddata = StandardScaler().fit_transform(data)
```
# K-means clustering
```
from sklearn.cluster import KMeans
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
#for loop to run k means with different no of clusters. we will find WCSS for all of them. WSCC =sum of square of the diff of each point to the centroid of the cluster. And draw graph with WCSS vs no of clusters
wcss=[]
for clusterCount in range(2,12):
kmeans = KMeans(n_clusters=clusterCount, init='k-means++',random_state=42)
kmeans.fit(scaleddata)
wcss.append(kmeans.inertia_)#get wcss
clusterLabels = kmeans.labels_
silhouette_avg = silhouette_score(scaleddata,clusterLabels)
print("For n_clusters={0}, the silhouette score is {1}".format(clusterCount, silhouette_avg))
plt.plot(range(2,12) # x axis
,wcss) # y axis
plt.title('The Elbow Curve')
plt.xlabel('Number of Clusters')
plt.ylabel('WCSS')
plt.show()
```
Max silhouette score is for 2 clusters. we pick the next highest score which is for 5 clusters.
```
cluster = KMeans(n_clusters=5, init='k-means++', max_iter=100, n_init=1)
cluster.fit(scaleddata)
cluster_labels=cluster.labels_
print(cluster_labels)
score = silhouette_score(scaleddata, cluster_labels)
print(score)
# Add cluster labels to preprocessed data
data['kcluster']=cluster_labels
data.head()
fig, ax = plt.subplots(figsize=(18,18))
sns.scatterplot(ax=ax, data=data, x="Frequency", y="Recency",size="Amount", hue="kcluster", palette="deep")
```
# Hierarchical clustering
```
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster import AgglomerativeClustering
connectivity = kneighbors_graph(scaleddata, n_neighbors=10, include_self=False)
model = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward')
model = model.fit(scaleddata)
cluster_labels=model.labels_
score = silhouette_score(scaleddata, cluster_labels)
print(score)
# Add cluster labels to preprocessed data
data['hcluster']=cluster_labels
data.head()
fig, ax = plt.subplots(figsize=(18,18))
sns.scatterplot(ax=ax, data=data, x="Frequency", y="Recency",size="Amount", hue="hcluster", palette="deep")
```
# MeanShift clustering
```
from sklearn.cluster import MeanShift
model = MeanShift(bandwidth=2)
model = model.fit(scaleddata)
cluster_labels=model.labels_
score = silhouette_score(scaleddata, cluster_labels)
print(score)
# Add cluster labels to preprocessed data
data['acluster']=cluster_labels
data.head()
fig, ax = plt.subplots(figsize=(18,18))
sns.scatterplot(ax=ax, data=data, x="Frequency", y="Recency",size="Amount", hue="acluster", palette="deep")
```
# SpectralClustering
```
from sklearn.cluster import SpectralClustering
model = SpectralClustering(n_clusters=5,assign_labels="discretize", random_state=0)
model = model.fit(scaleddata)
cluster_labels=model.labels_
cluster_labels=model.labels_
score = silhouette_score(scaleddata, cluster_labels)
print(score)
# Add cluster labels to preprocessed data
data['scluster']=cluster_labels
data.head()
fig, ax = plt.subplots(figsize=(18,18))
sns.scatterplot(ax=ax, data=data, x="Frequency", y="Recency",size="Amount", hue="scluster", palette="deep")
```
# MiniBatchKMeans Clustering
```
from sklearn.cluster import MiniBatchKMeans
model = MiniBatchKMeans(n_clusters=5)
model = model.fit(scaleddata)
cluster_labels=model.labels_
score = silhouette_score(scaleddata, cluster_labels)
print(score)
# Add cluster labels to preprocessed data
data['mcluster']=cluster_labels
data.head()
fig, ax = plt.subplots(figsize=(18,18))
sns.scatterplot(ax=ax, data=data, x="Frequency", y="Recency",size="Amount", hue="mcluster", palette="deep")
```
# DBSCAN Clustering
```
from sklearn.cluster import DBSCAN
model = DBSCAN(eps=3)
model = model.fit(scaleddata)
cluster_labels=model.labels_
score = silhouette_score(scaleddata, cluster_labels)
print(score)
# Add cluster labels to preprocessed data
data['dcluster']=cluster_labels
data.head()
fig, ax = plt.subplots(figsize=(18,18))
sns.scatterplot(ax=ax, data=data, x="Frequency", y="Recency",size="Amount", hue="dcluster", palette="deep")
```
| github_jupyter |
```
import time
import toml
import numpy as np
import matplotlib.pyplot as plt
from ref_trajectory import generate_trajectory as traj
%matplotlib inline
```
There are a lot of configuration parameters. It is a good idea to separate it from the main code. At some point you will be doing parameter tuning.
We will use toml format to store parameters. Open config.toml and go over the description of the different parameters we may use here
Read the config parameters, default output is a dictionary. You have to then store them as local variables.
You have 2 options for this
1. update locals() directly - a little bit dangerous
2. You can use itemgetter
```
config_params = toml.load("config.toml")['params']
print(config_params)
locals().update(config_params)
print(dt, V_MAX)
```
There are 3 functions we need to write for DWA
1. Simulate unicyle - we will slightly modify it to simulate $N$ steps
2. Command Window - Entire set of acceptable $(v, \omega)$ in that time instant given current $(v, \omega)$$
3. track - get $(v, \omega )$ for path with the lowest cost
In the first iteration, let us not integrate collision checking. Let us integrate these pieces, make sure DWA works for a few paths!
```
v_min, v_max = 0.0, 0.2
w_min, w_max = -0.1, 0.1
vs = np.linspace(v_min, v_max, num=11)
ws = np.linspace(w_min, w_max, num=11)
cmd = np.transpose([np.tile(vs, len(ws)), np.repeat(ws, len(vs))])
print(vs)
def simulate_unicycle(pose, v,w, N=1, dt=0.1):
x, y, t = pose
poses = []
for _ in range(N):
x += v*np.cos(t)*dt
y += v*np.sin(t)*dt
t += w*dt
poses.append([x,y,t])
return np.array(poses)
def command_window(v, w, dt=0.1):
"""Returns acceptable v,w commands given current v,w"""
# velocity can be (0, V_MAX)
# ACC_MAX = max linear acceleration
v_max = min(V_MAX, v + ACC_MAX*dt)
v_min = max(0, v - ACC_MAX*dt)
# omega can be (-W_MAX, W_MAX)
#W_DOT_MAX = max angular acceleration
epsilon = 1e-6
w_max = min(W_MAX, w + W_DOT_MAX*dt)
w_min = max(-W_MAX, w - W_DOT_MAX*dt)
#generate quantized range for v and omega
vs = np.linspace(v_min, v_max, num=11)
ws = np.linspace(w_min, w_max, num=21)
#cartesian product of [vs] and [ws]
#remember there are 0 velocity entries which have to be discarded eventually
commands = np.transpose([np.tile(vs, len(ws)), np.repeat(ws, len(vs))])
#calculate kappa for the set of commands
kappa = commands[:,1]/(commands[:,0]+epsilon)
#returning only commands < max curvature
return commands[(kappa < K_MAX) & (commands[:, 0] != 0)]
def track(ref_path, pose, v, w, dt=0.1):
commands = command_window(v, w, dt)
#initialize path cost
best_cost, best_command = np.inf, None
for i, (v, w) in enumerate(commands):
local_path = simulate_unicycle(pose, v, w) #Number of steps = prediction horizon
#if circle_collision_check(grid, local_path): #ignore colliding paths
# print("local path has a collision")
# continue
#calculate cross-track error
#can use a simplistic definition of
#how close is the last pose in local path from the ref path
cte = np.sqrt(((local_path[-1][0]-local_ref_path[-1][0])**2 + (local_path[-1][1]-local_ref_path[-1][1])**2))
#other cost functions are possible
#can modify collision checker to give distance to closest obstacle
cost = w_cte*cte + w_speed*(V_MAX - v)**2
#check if there is a better candidate
if cost < best_cost:
best_cost, best_command = cost, (v, w)
if best_command:
return best_command
else:
return [0, 0]
grid_res = 0.05
def circle_collision_check(grid, local_traj):
xmax, ymax = grid.shape
all_x = np.arange(xmax)
all_y = np.arange(ymax)
X, Y = np.meshgrid(all_x, all_y)
for xl, yl, tl in local_traj:
rot = np.array([[np.sin(tl), -np.cos(tl)],[np.cos(tl), np.sin(tl)]])
for xc, yc, rc in circles:
xc_rot, yc_rot = rot @ np.array([xc, yc]) + np.array([xl, yl])
xc_pix, yc_pix = int(xc_rot/grid_res), int(yc_rot/ grid_res)
rc_pix = (rc/ grid_res)
inside_circle = ((X-xc_pix)**2 +(Y-yc_pix)**2 - rc_pix**2 < 0)
occupied_pt = grid[X, Y] == 1
if np.sum(np.multiply(inside_circle, occupied_pt)):
return True
return False
start_pose = np.array([0, 0, np.pi/2])
route = [("straight", 5),("turn", -90),("straight", 6),("turn", 90)]
ref_path = traj(route, start_pose).T
pose = [start_pose]
logs = []
path_index = 0
v, w = 0.0, 0.0
while path_index < len(ref_path)-1:
t0 = time.time()
local_ref_path = ref_path[path_index:path_index+pred_horizon]
# update path_index using current pose and local_ref_path
pose=pose[0]
dist = np.sqrt(((pose[0]-ref_path[-1][0])**2 + (pose[1]-ref_path[-1][1])**2))
if dist > goal_threshold*10:
path_index = path_index+1
# get next command
v, w = track(local_ref_path, pose, v, w)
#simulate vehicle for 1 step
# remember the function now returns a trajectory, not a single pose
pose = simulate_unicycle(pose, v, w)
#update logs
logs.append([*pose, v, w])
t1 = time.time() #simplest way to time-profile your code
print(f"idx:{path_index}, v:{v:0.3f}, w:{w:0.3f}, time:{(t1-t0) * 1000:0.1f}ms")
poses = np.array(logs)[:,:3]
plt.figure()
plt.axes().set_aspect('equal', 'datalim')
plt.plot(ref_path[:,0], ref_path[:,1], '.', c='g')
plt.plot(poses[:,0], poses[:,1], c='r')
```
Now it should be relatively straight-forward to integrate collision checking in the grid environment the robot is going to navigate
```
```
| github_jupyter |
# Lab One - Climatic Averages
## *Analyzing the Global Temperatures Divergence from Average from 1880 - 2018*
In this lab we learn part 1 basics of Python (the programming commands) for data analysis through utilizing the Jupyter environment (this display) to analyze data.
You will learn how to:
- Use Jupyter
- Read in a CSV (comma seperated data file) into a data format for analysis
- Implement Simple Flow Structures (for loops, if statements)
- Plotting Basics (line plots, bar charts, and colors)
- Indexing (simple, and boolean indexing)
- Use the following data structures: numpy arrays and Pandas dataframes
- Use hex codes for colors
By the end of this lab you should be able to: read in simple data from CSV, use boolean indexing, and make a line plot / bar chart
Additional materials for reading and reference: Igual & Seguí Chapter 1 Chapter 2: Sections 2.1 through 2.6.2
More on Jupyter here:
http://jupyter-notebook.readthedocs.io/en/stable/notebook.html
## *Part 1 - What is Jupyter?*
Jupyter is a interactive environment (for example this Notebook is in Jupyter) where we can explore how a programming language, i.e. Python, works. I like to think of this as a display format which is mixed text like this box and code in the next box - we will be doing the first lab reports using Jupyter. Note this is not the only way to run Python.
### Running Cells:
- You can "run" various cells at a time by hitting shift-enter OR by hitting run after selecting a cell through the menu. Note you can run blocks of cells (say before or after a certain line through the Cell tab as well)
### Types of Cells:
- You can have cells as programming commands or as text. You can switch between programming language and text through the Cell -> Markdown option.
### Editing Cells:
- To edit a cell - double click in the cell to interact with the program.
### Interactive Exercise 1!
Let's try this out! We have two blank cells below. In the first cell type the following:
print('Python is awesome')
In the next cell type the same words, but this time switch the cell to "Markdown" through the cell menu.
Then run both cells through typing Shift+Enter OR Chitting the Run button above.
When you are done please put your nametag down. We will discuss shortly!
```
# This is cell one - type your commands below.
# In Python a '#' is a comment, anything after this
# will not be evaluated as a command.
# This is cell two - type your commands below.
```
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
### Why are we using Jupyter?
There are many ways to use Python. We are starting with Jupyter because (1) it allows us to learn together, (2) by using Jupyter you can enable a lot of self-learning (post class) on your own, and finally (3) your lab reports are much more interesting to complete. You get to turn in great Notebooks that are pre-evaluated. This means if you wrote half your code down but half didn't work, we can still give you partial credit. Wahoo! Becoming a coding expert takes time and effort, we want to reward this effort.
## *Part 2 - What is Python and why are we using it?*
Python is an object oriented programming, interpreted language. This means it has 'objects' which have certain rules or methods and attributes which you can access to run programming. The interpreted part means that there is no compilation. The interpreter reads code line by line from beginning to end. This Notebook is running a version of Python called Interactive Python, or IPython. We will see the benefits of using IPython in action in this first lab. We have already seen the print command above and compared markdown cells to programming cells.
### *Part 2. A - Let's start by importing some packages*
```
# Think of packages like enabling different levels of a game.
# The hashtag we use to make a comment, this line can be used so that the computer skips
# reading this code.
import numpy as np
import pandas as pd
# These two packages enable data analysis through various objects (and their methods) and
# data types.
# For example, you can create a numpy array - 1D data array - of numbers as follows:
# This creates an arrange from 0 up to 10 but not including 10, every second value.
example_array = np.arange(0, 10, 2)
# You can see this as follows with the following print command we saw earlier.
print(example_array)
# Each object in Python has a type, in this case we see that it is a numpy array.
type(example_array)
# We can access different values within an array with indexing, for example:
print(example_array[0]) #print index 0 value
print(example_array[1]) #print index 1 value
# Note that we start with index 0, meaning the final index of an array with length N is N-1
# Types also include, integers (non-decimal numbers), floats (decmial numbers), strings (words), many others...
# For example
first_entry = example_array[0]
print('{} has type {}'.format(first_entry, type(first_entry)))
example_int = 5
print('{} has type {}'.format(example_int, type(example_int)))
# We can also (sometimes) change the type of certain objects.
example_float = float(example_int)
print('{} has type {}'.format(example_float, type(example_float)))
# If you get a type error when you run code, use these type commands to see what is the
# issue, most likely you are trying to do something that can not be done to a string,
# or an integer etc.
# Note the type of this number is an numpy int64
# as compared to the type of of example int which is JUST int
# Regular Python integers are flexible sized while numpy integers are fixed size.
# The fixed size allows for faster computations!
```
### Interactive Exercise 2!
What do you think printing the index of -1 would output? We saw indexing of 0 and 1 before -
in the following cell print the value in the ExampleArray located at -1 index. When done put
your nametags down. While you wait for others to finish, what do you think this
implies about indexing in Python? How could this be advantageous? When might it become a problem?
```
###write your command below
print(example_array[-1])
```
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
### *Part 2. B - Now what about that other package we imported - Pandas*
Pandas is a package which we use to enable data set analysis. There are other data structures within Python that we
can use as well, for example numpy arrays, dictionaries, and lists. We will focus right now on pandas dataframes.
### What is a Pandas DataFrame?
A pandas dataframe is a 2D data structure which includes an index and rows and columns of data. These can include ints, floats, strings, etc.
A good way to think of a pandas dataframe is an excel spreadsheet which we analyze and interpret with Python. Enough about all this, let's get our hands on some data.
```
# Within the folder you all downloaded is a data subdirectory with global temperature
# anomaly data from following comma seperated values (CSV).
# This data is from the following website:
# https://www.ncdc.noaa.gov/cag/time-series/global/globe/land/ytd/12/1880-2018
# Provided by NOAA National Center for Environmental Information
# Global temperature anomaly data come from the Global Historical Climatology
# Network-Monthly (GHCN-M) data set and International Comprehensive Ocean-Atmosphere
# Data Set (ICOADS), which have data from 1880 to the present.
# These data are the timeseries of global-scale temperature anomalies calculated with respect to
# the 20th century average.
# The following command reads from a csv format into a pandas dataframe and assigns it to our variable
# that we named temp_var_global
# Note the header=4 argument; this designates four skipped lines before assigned variables.
temp_var_global = pd.read_csv('./Data/global_land_ocean_1880_2018_temp_variants.csv', header=4)
# Let's see if this was read in correctely - you should ALWAYS do this to make sure you read this in.
# Check the type - it should be a dataframe.
print(type(temp_var_global))
# Check the first 10 rows
temp_var_global.head(n=10)
# Here you can see on the left hand side the index values, followed by the year, then Value.
# This data did NOT provide a nice column name for the rows in the CSV file so we have the "Value".
# Given the information above and at the website we know that this is the temperature anomoly
# for Earth for each year in Celcius.
# You can also check the column values as follows:
print(temp_var_global.columns)
# Let's rename the column so it's not a vague "Value".
temp_var_global.rename(columns={'Value': 'AnomalyC'}, inplace=True)
#inplace = True is to prevent redefining a NEW dataframe object
# What would happen if we did not use inplace=True ?
# Let's check the columns
print(temp_var_global)
# How about for fun, let's check the 50'th entry?
print(temp_var_global.loc[50])
# What about the entire row?
print(temp_var_global.loc[50, :])
```
### Put your nametag down when you have reached this point. Can you guess what : does within this example?
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
### *Part 2. C - Adding Values into a Dataframe*
### Our temperatures are in Celsius, maybe it would make sense to create a column in Farenheit?
Formula is: $\Delta\mathrm{T}(F) = \Delta\mathrm{T}(C) \times 1.8$ ; for a temperature difference we, skip adding $32$.
**Note: Explore the Markdown version of this cell by double-clicking this line.**
*This is how we write equations using LaTeX in a Jupyter Notebook.* See https://en.wikibooks.org/wiki/LaTeX/Mathematics for more examples of how to use LaTeX.
```
# Here we define a NEW column based off the old column.
temp_var_global['AnomalyF'] = temp_var_global['AnomalyC'] * 1.8
# Let's make sure this did what we wanted.
print(temp_var_global.head(n=10))
# Notice that the dataframe is ordered by year.
# What if we instead wanted to order it by temperature anomaly?
# Pandas Dataframes have a method for sorting.
temp_var_global_sorted = temp_var_global.sort_values("AnomalyC", ignore_index=True)
temp_var_global_sorted.head(10)
# To find all available methods for any object, one can use
# tab completion on the dot operator...
# Type "temp_var_global." below, and then press tab
# with your cursor next to the dot. (it may take a few seconds to load)
temp_var_global.
# To find out what each method or attribute is, type the command followed by "?"
# and run the cell.
temp_var_global.sort_values?
# You can also find most documentation for Python and various packages online.
# E.g. https://pandas.pydata.org/pandas-docs/stable/
```
### We will pause here to discuss what questions have on Pandas or Numpy.
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
## *Part 3. Basic Plotting*
```
# Step one, we import a plotting package.
import matplotlib.pyplot as plt
# To enable our visualization within within the notebook,
# we use the following command
%matplotlib inline
# Define a figure (think of this as a page).
fig = plt.figure(figsize=(10, 5))
# Let's give it a title.
fig.suptitle('Temperature Variants From 1800 - 2018 Global Averages', fontsize=20)
# Lets just make a line plot, worry about everything else later
plt.plot(temp_var_global['Year'], temp_var_global['AnomalyF'])
# And label some axes.
plt.ylabel('Anomaly $\degree$F', fontsize = 20)
plt.xlabel('Year', fontsize = 20);
# Notice the ";" at the end of the last line. The semicolon suppresses output. Try running this
# cell after removing the semicolon. What does this imply about what the plt.xlabel() method returns?
# What do you think the plt.plot() method returns? Why might this be useful?
# NOTE: using $COMMANDS$ in a text entry will enable mathematical symbols through LaTeX
```
### We will pause here to discuss our opinions on this plot.
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
```
fig = plt.figure(figsize=(10, 5))
fig.suptitle('Temperature Variants From 1880 - 2018 Global Averages', fontsize=20)
plt.plot(temp_var_global['Year'], temp_var_global['AnomalyF'])
plt.ylabel('Anomaly $\degree$F', fontsize = 20)
plt.xlabel('Year', fontsize = 20)
#----------------------------------------- we add the following to our code
plt.xticks(fontsize=16) #make the xaxis labels larger
plt.yticks(fontsize=16) #make the yaxis labels larger
plt.axhline(y=0.0, color='k', linestyle='--') #add a horizontal line at 0
plt.grid(color='gray', linestyle='dashed'); #add a grid so it's easier to tell if at zero
#-----------------------------------------
```
### What we lose here in terms of the information presented is that we KNOW that each point has a definitive width of 1 year, it's an average over that year, the chaotic behaivor of the line is more misleading than informative. A bar chart would fix this issue. Go ahead and try out the next plot block.
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
```
fig = plt.figure(figsize=(10, 5))
fig.suptitle('Temperature Variants From 1880 - 2018 Global Averages', fontsize=20)
#----------------------------------------- we have edited this into a bar chart
# Note we make it purple for fun ;)
# also the align = 'edge' will align to the left side of the range
# width = 0.8 rather than 1 simply to make it appear more interesting. Go ahead and play
# with the width to see why it's at 0.8
plt.bar(temp_var_global['Year'], temp_var_global['AnomalyF'], width = 0.8, align='edge',
color = 'purple')
#-----------------------------------------
plt.ylabel('Anomaly $\degree$F', fontsize = 20)
plt.xlabel('Year', fontsize = 20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.axhline(y=0.0, color='k', linestyle='--')
plt.grid(color='gray', linestyle='dashed')
```
## *Part 4 - More advanced plotting*
So while purple was fun, that's not really the main message of this chart. We want to show when it's greater than zero, and less than zero. It would be best if these were actually different colors.
Remember how we set this equal to purple? We can also assign a column JUST for colors in our dataframe so that each bar could have different colors when we plot it. There are two more obvious ways to go about this. In both ways our goal is to create a NEW column in the data frame with the colors of the bars where if less than zero we make blue ('b'), greater than we make red ('r').
#### Come up with an idea of how you would do this. Do not look below to gain inspiration.
#### Then discuss your ideas with your neighbor. Be ready to share with the class.
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
#### The two ways we are going to learn in this laboratory are as follows.
#### Way 1)
- The tried and true brute force method of using a for loop to loop over every data value in our array, create a new column, and fill with what color we want it to be.
#### Way 2)
- Use the built-in methods of objects (in this case pandas.DataFrame) to avoid writing extensive loop structures
Let's start with way one.
### *Part 4. A. METHOD ONE - For Loops.*
```
# Set up new column just for colors, and fill with 'g' for now. We will know if we did
# Something wrong when we plot it because it will be green
temp_var_global['Colors'] = 'g'
# Now we want to "loop over" our data frame, setting each value in the frame in the column of
#'Colors' to a set value.
# What do I mean by a for loop? Here's an example, remember our numpy array from earlier?
for i in range(len(example_array)):
print("The value of example_array at index {1} is {0}.".format(example_array[i], i))
# What if we want to count each entry? There's a function called enumerate for numpy arrays
# note it starts at zero.
# The following produces the same as the previous for loop.
# this will loop over the pair index AND entry
for index, entry in enumerate(example_array):
print("The value of the array at index {} is {}.".format(index, entry))
```
### Now let's apply what we learned from the for loops to the dataframe.
```
# This one is a bit more complicated...but it's the same basic idea where enumerate for
# numpy arrays is replaced with iterrows() for pandas dataframes, which is exactly what
# it sounds like, iterate over rows :)
for index, row in temp_var_global.iterrows():
#iterrates over the entirty of the dataframe
print("At index {} and year {} the value of AnomalyF is: {}".format(index, row['Year'],
row['AnomalyF']))
# Now lets actually assign colors, we can use if statments here another flow control structure
# in words what this loop means: for every entry in our dataframe, see if > 0 or less < 0
for index, row in temp_var_global.iterrows():
#check if greater than 0
if row['AnomalyF'] > 0:
#set value in array as red
temp_var_global.at[index, 'Colors'] = 'red'
#check if less than 0, or equal to
if row['AnomalyF'] <= 0:
#set value in array as blue
temp_var_global.at[index, 'Colors'] = 'blue'
# Note: you can find more pre-defined colors in matplotlib here:
# https://matplotlib.org/gallery/color/named_colors.html#sphx-glr-gallery-color-named-colors-py
```
### PAUSE. Think about what is happening in this loop. Explain it to your neighbor. When you are done chatting put your name tag down.
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
```
#let's check to see what this looks like, print from index 50 - 70
#print both colors and anomalyF columns
print(temp_var_global.loc[50:70, ['Colors', 'AnomalyF']])
#and now let's plot it!
fig = plt.figure(figsize=(10, 5))
fig.suptitle('Temperature Variants From 1800 - 2017 Global Averages', fontsize=20)
#------------------------------------------------ we edited the following color statement ONLY
plt.bar(temp_var_global['Year'], temp_var_global['AnomalyF'], width = 0.8, align='edge',
color = temp_var_global['Colors'])
#------------------------------------------------
plt.ylabel('Anomaly $\degree$F', fontsize = 20)
plt.xlabel('Year', fontsize = 20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.axhline(y=0.0, color='k', linestyle='--')
plt.grid(color='gray', linestyle='dashed')
```
### *Part 4. B. METHOD TWO - Boolean Indexing.*
In general python "slows down" with loops. It also allows for better programming LATER (in class) if we use what we
call 'Boolean Indexing'.
We use those logic statements before for greater or lesser and use them to select positions in the dataframe to then subset into our data. For example, all places with >0, all places with <0
```
#Step One, create boolean arrays, we call these indexes
boolean_index = temp_var_global['AnomalyF'] > 0
print(temp_var_global.loc[0:10, 'AnomalyF'])
print(boolean_index[0:10])
#let's see what these look like!
# The tilda operator flips the boolean index to the opposite truth value
print(~boolean_index[0:10])
# Now, rather than the for loop we can set up a different system
temp_var_global['Colors2'] = 'k'
temp_var_global.loc[boolean_index, 'Colors2'] = '#9F4E58'
temp_var_global.loc[~boolean_index, 'Colors2'] = '#64ACEA'
# To customize our colors we use hex code - check out this website.
#http://www.color-hex.com/color/90537c
print(temp_var_global.loc[0:10, ['Colors', 'Colors2']])
fig = plt.figure(figsize=(10, 5))
fig.suptitle('Temperature Variants From 1800 - 2017 Global Averages', fontsize=20)
#-------- we edited the following color statement ONLY
plt.bar(temp_var_global['Year'], temp_var_global['AnomalyF'], width = 0.8, align='edge',
color = temp_var_global['Colors2'])
#----------
plt.ylabel('Anomaly $\degree$F', fontsize = 20)
plt.xlabel('Year', fontsize = 20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.axhline(y=0.0, color='k', linestyle='--')
plt.grid(color='gray', linestyle='dashed')
#how to save figures -
#this first command will not save with a white background, the second will
plt.savefig('./Figures/TempVariants_GlobalYearlyAverages_Transparent.png', transparent=True)
plt.savefig('./Figures/TempVariants_GlobalYearlyAverages.png')
#please go see within your Jupyter folder the .png file
```
### How did the methods we learn differ and compare to each other? The for loop vs the boolean index method? Specifically, did you like one more than the other?
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
# SUMMARY
From this lab you have learned the basics of python packages including: numpy, pandas, and matplotlib.pyplot.
We have also learned about flow control structures (for loops, if statements) and how to access a pandas data frame through boolean indexing and through normal indexing and how to manipulate various columns here.
Next time we will continue our learning of Python.
Homework: Please complete Assignment 1 located on Canvas - due in 1 week on Wednesday, January 22. Office hours will be held on Friday from 1-3pm in CSRB 2218 (two days from now). If this time doesn't work please email us and we can arrange seperate hours.
<br>
<br>
<br>
<br>
<br>
### Additional Material - Error propagation and reporting
Suppose many people took measurements of the length and width of the CSRB and found:
Length = $112.1 \pm 0.4 \mathrm{m}$
Width = $55.5 \pm 0.5 \mathrm{m}$
How would you report the area, including uncertainty, of the footprint of the building?
We know that Area = Length x Width $\pm$ $\delta A$, where
$$\delta A = \sqrt{\left(\frac{\partial A}{\partial w} \delta w \right)^2 + \left(\frac{\partial A}{\partial l} \delta l \right)^2 }$$
This is equivalent to:
$$ \frac{\delta A}{A} = \sqrt{\left(\frac{\delta w}{w} \right)^2 + \left(\frac{\delta l}{l} \right)^2}$$
Let's see how this looks in code!
```
# Assign values given in problem.
length, width = 112.1, 55.5
dl, dw = 0.4, 0.5
# Calculate Area
area = length * width
# Calculate error propagation; we will import from Python's built-in math module
from math import sqrt
# Using the first equation; note in Python the exponent operator is **
dA = sqrt((length * dw)**2 + (width * dl)**2)
# Let's calculate dA using the second equation
dA_over_A = sqrt((dw / width)**2 + (dl / length)**2)
# And to get dA, we will need to multiply by our calculated Area.
dA_2 = dA_over_A * area
# Are the two equations above equal?
dA == dA_2 # == is a boolean operator that tests for equality, can also use != for not equal
```
We are now ready to report our calculation and uncertainty.
```
print("Area = {} +\- {}".format(area, dA))
```
**Don't forget to use significant figures and units!**
```
print("Area = {0:0.1f} +/- {1:0.1f} m^2".format(area, dA))
```
You can find more about formatting strings in Python in the documentation.
See Format Specification Mini-Language on the webpage:
https://docs.python.org/3.7/library/string.html
We can also use a markdown cell to explicitly type in our answers, and use $\LaTeX$ to make it look presentable.
**Here is the preferred way to report our calculation**:
$$ A = 6221.5 \pm 60.3 \mathrm{m}^2 $$
For help on Markdown and LaTeX syntax, see:
https://www.markdownguide.org/cheat-sheet/
https://en.wikibooks.org/wiki/LaTeX/Mathematics
| github_jupyter |
# AutoGluon Tabular with SageMaker
[AutoGluon](https://github.com/awslabs/autogluon) automates machine learning tasks enabling you to easily achieve strong predictive performance in your applications. With just a few lines of code, you can train and deploy high-accuracy deep learning models on tabular, image, and text data.
This notebook shows how to use AutoGluon-Tabular with Amazon SageMaker by creating custom containers.
## Prerequisites
If using a SageMaker hosted notebook, select kernel `conda_mxnet_p36`.
```
# Make sure docker compose is set up properly for local mode
!./setup.sh
import os
import boto3
import sagemaker
from time import sleep
from collections import Counter
import numpy as np
import pandas as pd
from sagemaker import get_execution_role, local, Model, utils, s3
from sagemaker.estimator import Estimator
from sagemaker.predictor import Predictor
from sagemaker.serializers import CSVSerializer
from sagemaker.deserializers import StringDeserializer
from sklearn.metrics import accuracy_score, classification_report
from IPython.core.display import display, HTML
from IPython.core.interactiveshell import InteractiveShell
# Print settings
InteractiveShell.ast_node_interactivity = "all"
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 10)
# Account/s3 setup
session = sagemaker.Session()
local_session = local.LocalSession()
bucket = session.default_bucket()
prefix = 'sagemaker/autogluon-tabular'
region = session.boto_region_name
role = get_execution_role()
client = session.boto_session.client(
"sts", region_name=region, endpoint_url=utils.sts_regional_endpoint(region)
)
account = client.get_caller_identity()['Account']
registry_uri_training = sagemaker.image_uris.retrieve('mxnet', region, version= '1.7.0', py_version='py3', instance_type='ml.m5.2xlarge', image_scope='training')
registry_uri_inference = sagemaker.image_uris.retrieve('mxnet', region, version= '1.7.0', py_version='py3', instance_type='ml.m5.2xlarge', image_scope='inference')
ecr_uri_prefix = account +'.'+'.'.join(registry_uri_training.split('/')[0].split('.')[1:])
```
### Build docker images
Build the training/inference image and push to ECR
```
training_algorithm_name = 'autogluon-sagemaker-training'
inference_algorithm_name = 'autogluon-sagemaker-inference'
!/bin/bash ./container-training/build_push_training.sh {account} {region} {training_algorithm_name} {ecr_uri_prefix} {registry_uri_training.split('/')[0].split('.')[0]} {registry_uri_training}
!/bin/bash ./container-inference/build_push_inference.sh {account} {region} {inference_algorithm_name} {ecr_uri_prefix} {registry_uri_training.split('/')[0].split('.')[0]} {registry_uri_inference}
```
### Get the data
In this example we'll use the direct-marketing dataset to build a binary classification model that predicts whether customers will accept or decline a marketing offer.
First we'll download the data and split it into train and test sets. AutoGluon does not require a separate validation set (it uses bagged k-fold cross-validation).
```
# Download and unzip the data
!aws s3 cp --region {region} s3://sagemaker-sample-data-{region}/autopilot/direct_marketing/bank-additional.zip .
!unzip -qq -o bank-additional.zip
!rm bank-additional.zip
local_data_path = './bank-additional/bank-additional-full.csv'
data = pd.read_csv(local_data_path)
# Split train/test data
train = data.sample(frac=0.7, random_state=42)
test = data.drop(train.index)
# Split test X/y
label = 'y'
y_test = test[label]
X_test = test.drop(columns=[label])
```
##### Check the data
```
train.head(3)
train.shape
test.head(3)
test.shape
X_test.head(3)
X_test.shape
```
Upload the data to s3
```
train_file = 'train.csv'
train.to_csv(train_file,index=False)
train_s3_path = session.upload_data(train_file, key_prefix='{}/data'.format(prefix))
test_file = 'test.csv'
test.to_csv(test_file,index=False)
test_s3_path = session.upload_data(test_file, key_prefix='{}/data'.format(prefix))
X_test_file = 'X_test.csv'
X_test.to_csv(X_test_file,index=False)
X_test_s3_path = session.upload_data(X_test_file, key_prefix='{}/data'.format(prefix))
```
## Hyperparameter Selection
The minimum required settings for training is just a target label, `init_args['label']`.
Additional optional hyperparameters can be passed to the `autogluon.tabular.TabularPredictor.fit` function via `fit_args`.
Below shows a more in depth example of AutoGluon-Tabular hyperparameters from the example [Predicting Columns in a Table - In Depth](https://auto.gluon.ai/stable/tutorials/tabular_prediction/tabular-indepth.html). Please see [fit parameters](https://auto.gluon.ai/stable/_modules/autogluon/tabular/predictor/predictor.html#TabularPredictor) for further information. Note that in order for hyperparameter ranges to work in SageMaker, values passed to the `fit_args['hyperparameters']` must be represented as strings.
```python
nn_options = {
'num_epochs': "10",
'learning_rate': "ag.space.Real(1e-4, 1e-2, default=5e-4, log=True)",
'activation': "ag.space.Categorical('relu', 'softrelu', 'tanh')",
'layers': "ag.space.Categorical([100],[1000],[200,100],[300,200,100])",
'dropout_prob': "ag.space.Real(0.0, 0.5, default=0.1)"
}
gbm_options = {
'num_boost_round': "100",
'num_leaves': "ag.space.Int(lower=26, upper=66, default=36)"
}
model_hps = {'NN': nn_options, 'GBM': gbm_options}
init_args = {
'eval_metric' : 'roc_auc'
'label': 'y'
}
fit_args = {
'presets': ['best_quality', 'optimize_for_deployment'],
'time_limits': 60*10,
'hyperparameters': model_hps,
'hyperparameter_tune': True,
'search_strategy': 'skopt'
}
hyperparameters = {
'fit_args': fit_args,
'feature_importance': True
}
```
**Note:** Your hyperparameter choices may affect the size of the model package, which could result in additional time taken to upload your model and complete training. Including `'optimize_for_deployment'` in the list of `fit_args['presets']` is recommended to greatly reduce upload times.
<br>
```
# Define required label and optional additional parameters
init_args = {
'label': 'y'
}
# Define additional parameters
fit_args = {
'label': 'y',
# Adding 'best_quality' to presets list will result in better performance (but longer runtime)
'presets': ['optimize_for_deployment'],
}
# Pass fit_args to SageMaker estimator hyperparameters
hyperparameters = {
# 'init_args': init_args,
'fit_args': fit_args,
'feature_importance': True
}
tags = [{
'Key' : 'AlgorithmName',
'Value' : 'AutoGluon-Tabular'
}]
```
## Train
For local training set `train_instance_type` to `local` .
For non-local training the recommended instance type is `ml.m5.2xlarge`.
**Note:** Depending on how many underlying models are trained, `train_volume_size` may need to be increased so that they all fit on disk.
```
%%time
instance_type = 'ml.m5.2xlarge'
#instance_type = 'local'
ecr_image = f'{ecr_uri_prefix}/{training_algorithm_name}:latest'
estimator = Estimator(image_uri=ecr_image,
role=role,
instance_count=1,
instance_type=instance_type,
hyperparameters=hyperparameters,
volume_size=100,
tags=tags)
# Set inputs. Test data is optional, but requires a label column.
inputs = {'training': train_s3_path, 'testing': test_s3_path}
estimator.fit(inputs)
```
### Review the performance of the trained model
```
from utils.ag_utils import launch_viewer
launch_viewer(is_debug=False)
```
### Create Model
```
# Create predictor object
class AutoGluonTabularPredictor(Predictor):
def __init__(self, *args, **kwargs):
super().__init__(*args,
serializer=CSVSerializer(),
deserializer=StringDeserializer(), **kwargs)
ecr_image = f'{ecr_uri_prefix}/{inference_algorithm_name}:latest'
if instance_type == 'local':
model = estimator.create_model(image_uri=ecr_image, role=role)
else:
#model_uri = os.path.join(estimator.output_path, estimator._current_job_name, "output", "model.tar.gz")
model_uri = estimator.model_data
model = Model(ecr_image, model_data=model_uri, role=role, sagemaker_session=session, predictor_cls=AutoGluonTabularPredictor)
```
### Batch Transform
For local mode, either `s3://<bucket>/<prefix>/output/` or `file:///<absolute_local_path>` can be used as outputs.
By including the label column in the test data, you can also evaluate prediction performance (In this case, passing `test_s3_path` instead of `X_test_s3_path`).
```
output_path = f's3://{bucket}/{prefix}/output/'
# output_path = f'file://{os.getcwd()}'
transformer = model.transformer(instance_count=1,
instance_type=instance_type,
strategy='MultiRecord',
max_payload=6,
max_concurrent_transforms=1,
output_path=output_path)
transformer.transform(test_s3_path, content_type='text/csv', split_type='Line')
transformer.wait()
```
### Endpoint
##### Deploy remote or local endpoint
```
instance_type = 'ml.m5.2xlarge'
#instance_type = 'local'
predictor = model.deploy(initial_instance_count=1,
instance_type=instance_type)
```
##### Attach to endpoint (or reattach if kernel was restarted)
```
# Select standard or local session based on instance_type
if instance_type == 'local':
sess = local_session
else:
sess = session
# Attach to endpoint
predictor = AutoGluonTabularPredictor(predictor.endpoint, sagemaker_session=sess)
```
##### Predict on unlabeled test data
```
results = predictor.predict(X_test.to_csv(index=False)).splitlines()
# Check output
print(Counter(results))
```
##### Predict on data that includes label column
Prediction performance metrics will be printed to endpoint logs.
```
results = predictor.predict(test.to_csv(index=False)).splitlines()
# Check output
print(Counter(results))
```
##### Check that classification performance metrics match evaluation printed to endpoint logs as expected
```
y_results = np.array(results)
print("accuracy: {}".format(accuracy_score(y_true=y_test, y_pred=y_results)))
print(classification_report(y_true=y_test, y_pred=y_results, digits=6))
```
##### Clean up endpoint
```
predictor.delete_endpoint()
```
| github_jupyter |
```
import plaidml.keras
plaidml.keras.install_backend()
import os
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
# Importing useful libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, GRU, Bidirectional, Conv1D, Flatten, MaxPooling1D
from keras.optimizers import SGD
import math
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from keras import optimizers
import time
# from google.colab import files
# uploaded = files.upload()
df = pd.read_csv('../data/num_data.csv')
# import io
# df = pd.read_csv(io.BytesIO(uploaded['num_data.csv']))
def return_rmse(test,predicted):
rmse = math.sqrt(mean_squared_error(test, predicted))
return rmse
```
### Data Processing
```
df = pd.read_csv('../data/num_data.csv')
POLLUTION = ['PM2.5', 'PM10', 'SO2', 'NO2', 'CO', 'O3']
WEATHER = ['PM2.5', 'TEMP', 'PRES', 'DEWP', 'RAIN', 'wd', 'WSPM']
dataset = df
dataset.shape
data_size = dataset.shape[0]
train_size=int(data_size * 0.6)
test_size = int(data_size * 0.2)
valid_size = data_size - train_size - test_size
training_set = dataset[:train_size].iloc[:,4:16].values
valid_set = dataset[train_size:train_size+valid_size].iloc[:,4:16].values
test_set = dataset[data_size-test_size:].iloc[:,4:16].values
y = dataset.iloc[:,0].values
y = y.reshape(-1,1)
n_feature = training_set.shape[1]
y.shape
# Scaling the dataset
sc = MinMaxScaler(feature_range=(0,1))
training_set_scaled = sc.fit_transform(training_set)
valid_set_scaled = sc.fit_transform(valid_set)
test_set_scaled = sc.fit_transform(test_set)
sc_y = MinMaxScaler(feature_range=(0,1))
y_scaled = sc_y.fit_transform(y)
# convert dataset into sequences, where n_steps_in is the input sequence lengith,
# and n_steps_out is the output sequence length
def convert_to_sequences(sequences, n_steps_in, n_steps_out):
X_, y_ = [], []
for i in range(len(sequences)):
tail_x = i + n_steps_in
out_tail_x = tail_x + n_steps_out-1
if out_tail_x > len(sequences):
break
seq_x, seq_y = sequences[i:tail_x, :], sequences[tail_x-1:out_tail_x, 0]
X_.append(seq_x)
y_.append(seq_y)
return np.array(X_), np.array(y_)
n_steps_in = 12
n_steps_out = 12
X_train, y_train = convert_to_sequences(training_set_scaled, n_steps_in, n_steps_out)
X_valid, y_valid = convert_to_sequences(valid_set_scaled, n_steps_in, n_steps_out)
X_test, y_test = convert_to_sequences(test_set_scaled, n_steps_in, n_steps_out)
```
## Grid Search Control
```
n_activation = ['tanh', 'sigmoid', 'relu']
act = n_activation[0]
n_learn_rate = [0.01, 0.001, 0.0001]
lr = n_learn_rate[0]
n_optimizers = [optimizers.Adam(lr=lr), optimizers.RMSprop(lr=lr), optimizers.SGD(lr=lr)]
opt = n_optimizers[0]
n_epoches = [50]
epoch = n_epoches[0]
n_batch_size = [1024, 2048, 5096, 10192]
batch = n_batch_size[-1]
n_of_neurons = [10, 50, 200]
neuron = n_of_neurons[1]
rmse_df = pd.DataFrame(columns=['Model', 'train_rmse', 'valid_rmse', 'test_rmse', 'train_time', 'epoch',
'batch', 'neuron'])
for batch in n_batch_size:
for lr in n_learn_rate:
n_optimizers = [optimizers.Adam(lr=lr)]
for opt in n_optimizers:
for loss_function in ['mean_squared_error', 'mean_absolute_error']:
DFS = Sequential()
DFS.add(Conv1D(filters=64, kernel_size=6, activation='tanh', input_shape=(X_train.shape[1],n_feature)))
DFS.add(MaxPooling1D(pool_size=4))
DFS.add(Dropout(0.2))
DFS.add(LSTM(units=neuron, return_sequences=False, input_shape=(X_train.shape[1],n_feature), activation=act))
DFS.add(Dropout(0.190 + 0.0025 * n_steps_in))
DFS.add(Dense(units=n_steps_out))
DFS.compile(optimizer=opt,loss='mean_squared_error')
regressor = DFS
model = 'DFS'
print('training start for', model)
start = time.process_time()
regressor.fit(X_train,y_train,epochs=epoch,batch_size=batch)
train_time = round(time.process_time() - start, 2)
print('results for training set')
y_train_pred = regressor.predict(X_train)
train_rmse = return_rmse(y_train,y_train_pred)
print('results for valid set')
y_valid_pred = regressor.predict(X_valid)
valid_rmse = return_rmse(y_valid,y_valid_pred)
print('results for test set')
y_test_pred = regressor.predict(X_test)
test_rmse = return_rmse(y_test,y_test_pred)
one_df = pd.DataFrame([[model, train_rmse, valid_rmse, test_rmse, train_time, batch, lr, loss_function ]],
columns=['Model', 'train_rmse', 'valid_rmse', 'test_rmse', 'train_time', 'Batch Size',
'Learning Rate', 'Loss Function'])
rmse_df = pd.concat([rmse_df, one_df])
# save the rmse results
rmse_df.to_csv('../dfs_grid_search_part3.csv')
```
| github_jupyter |
# Q-learning applied to FrozenLake
#### **Remember**: Q-learning is a model free, off-policy algorithm that can be used to find an optimal action using a Q function. Q can be represented as a table that contains a value for each pair state-action
To review Q-learning watch [Q learning explained by Siraj](https://www.youtube.com/watch?v=aCEvtRtNO-M)
#### Q-learning pipeline is quite easy an can be summarised in 5 blocks:

## WHAT'S THE ENVIRONMENT?
#### We'll apply Q-learning on a [Gym](http://gym.openai.com/) game called [FrozenLake](https://gym.openai.com/envs/FrozenLake-v0/)

## LET'S START TO CODE
```
import gym
import random
from collections import namedtuple
import collections
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
### BASIC FUNCTION TO CHOOSE AN ACTION FOLLOWING DIFFERENT POLICIES
```
def select_eps_greedy_action(table, obs, n_actions):
'''
Select the action using a ε-greedy policy (add a randomness ε for the choice of the action)
'''
value, action = best_action_value(table, obs)
if random.random() < epsilon:
return random.randint(0,n_actions-1)
else:
return action
def select_greedy_action(table, obs, n_actions):
'''
Select the action using a greedy policy (take the best action according to the policy)
'''
value, action = best_action_value(table, obs)
return action
def best_action_value(table, state):
'''
Exploring the table, take the best action that maximize Q(s,a)
'''
best_action = 0
max_value = 0
for action in range(n_actions):
if table[(state, action)] > max_value:
best_action = action
max_value = table[(state, action)]
return max_value, best_action
```

```
def Q_learning(table, obs0, obs1, reward, action):
'''
Q-learning. Update Q(obs0,action) according to Q(obs1,*) and the reward just obtained
'''
# Take the best value reachable from the state obs1
best_value, _ = best_action_value(table, obs1)
# Calculate Q-target value
Q_target = reward + GAMMA * best_value
# Calculate the Q-error between the target and the previous value
Q_error = Q_target - table[(obs0, action)]
# Update Q(obs0,action)
table[(obs0, action)] += LEARNING_RATE * Q_error
```
### TEST THE POLICY
```
def test_game(env, table):
'''
Test the new table playing TEST_EPISODES games
'''
n_actions = env.action_space.n
reward_games = []
for _ in range(TEST_EPISODES):
obs = env.reset()
rewards = 0
while True:
# Act greedly
next_obs, reward, done, _ = env.step(select_greedy_action(table, obs, n_actions))
obs = next_obs
rewards += reward
if done:
reward_games.append(rewards)
break
return np.mean(reward_games)
```
### MAIN PROCEDURE
```
# Some hyperparameters..
GAMMA = 0.95
# NB: the decay rate allow to regulate the Exploration - Exploitation trade-off
# start with a EPSILON of 1 and decay until reach 0
EPS_DECAY_RATE = 0.9993
LEARNING_RATE = 0.8
# .. and constants
TEST_EPISODES = 100
MAX_GAMES = 15000
# Create the environment
#env = gym.make('Taxi-v2')
env = gym.make("FrozenLake-v0")
obs = env.reset()
obs_length = env.observation_space.n
n_actions = env.action_space.n
reward_count = 0
games_count = 0
# Create and initialize the table with 0.0
table = collections.defaultdict(float)
test_rewards_list = []
# Reinitialize epsilon after each session
epsilon = 1.0
while games_count < MAX_GAMES:
# Select the action following an ε-greedy policy
action = select_eps_greedy_action(table, obs, n_actions)
next_obs, reward, done, _ = env.step(action)
# Update the Q-table
Q_learning(table, obs, next_obs, reward, action)
reward_count += reward
obs = next_obs
if done:
epsilon *= EPS_DECAY_RATE
# Test the new table every 1k games
if (games_count + 1) % 1000 == 0:
test_reward = test_game(env, table)
print('\tEp:', games_count, 'Test reward:', test_reward, np.round(epsilon,2))
test_rewards_list.append(test_reward)
obs = env.reset()
reward_count = 0
games_count += 1
# Plot the accuracy over the number of steps
plt.figure(figsize=(18,9))
plt.xlabel('Steps')
plt.ylabel('Accurracy')
plt.plot(test_rewards_list)
plt.show()
```
#### NB: in case you want to apply Q-learning to continuous state and actions games, you have to quantize the state and action spaces
| github_jupyter |
<img style="float: center;" src="images/CI_horizontal.png" width="600">
<center>
<span style="font-size: 1.5em;">
<a href='https://www.coleridgeinitiative.org'>Website</a>
</span>
</center>
Rayid Ghani, Frauke Kreuter, Julia Lane, Adrianne Bradford, Alex Engler, Nicolas Guetta Jeanrenaud, Graham Henke, Daniela Hochfellner, Clayton Hunter, Brian Kim, Avishek Kumar, Jonathan Morgan, Ridhima Sodhi, and Benjamin Feder.
_source to be updated when notebook added to GitHub_
# Record Linkage
----
# Table of Contents
JupyterLab contains a dynamic Table of Contents that can be accessed by clicking the last of the six icons on the left-hand sidebar.
## Introduction
This notebook will provide you with an introduction to record linkage using Python. Upon completion of this notebook, you will be able to apply record linkage techniques using the `recordlinkage` package to combine data from different sources in python. You will go through all the steps necessary for a successful record linkage starting with data preparation, which includes pre-processing, cleaning and standardizing data.
### Learning Objectives
We will explore different record linkage techniques in order to match the `tip_2017` table of WIC-approved vendors with `store_info_all`, which contains general vendor information.
## The Principles of Record Linkage
The goal of record linkage is to determine if pairs of records describe the same identity. For instance, this is important for removing duplicates from a data source or joining two separate data sources together. Record linkage also goes by the terms data matching, merge/purge, duplication detection, de-duping, reference matching, entity resolution, disambiguation, co-reference/anaphora in various fields.
There are several approaches to record linkage that include:
- exact matching
- rule-based linking
- probabilistic linking
- An example of **exact matching** is joining records based on social security number, exact name, or geographic code information. You have already have done this in SQL when joining tables on an unique identifier.
- **Rule-based matching** involves applying a cascading set of rules that reflect the domain knowledge of the records being linked.
- In **probabilistic record linkages**, linkage weights are estimated to calculate the probability of a certain match.
In practical applications, you will need record linkage techniques to combine information addressing the same entity that is stored in different data sources. Record linkage will also help you to address the quality of varying data sources. For example, if one of your databases has missing values, you might be able to fill those by finding an identical pair in a different data source. Overall, the main applications of record linkage are:
1. Merging two or more data files
2. Identifying the intersection of the two data sets
3. Updating data files (with the data row of the other data files) and imputing missing data
4. Entity disambiguation and de-duplication
## Analytical Approach
For this notebook exercise, we are interested in vendor data.
- **Analytical Exercise**: Find WIC-approved stores in the vendor data supplied by IRI.
- **Data Availability**: We have names, addresses, and years for the various vendors in the two tables.
- **Approach**: We will look at the data available to us and clean & pre-process it to enable better linkage. Afterwards, we will use string matching techniques that are enabled by record linkage package in Python.
## Access the Data
Python provides us with some tools we can use for record linkages so we don't have to start from scratch and code our own linkage algorithms. Before we start, we need to load the package `recordlinkage`. We will be adding a few more packages than usual to our import process because the `recordlinkage` package has a few dependencies on other packages.
```
# general use imports
%pylab inline
import datetime
import numpy as np
import re
# pandas-related imports
import pandas as pd
# record linkage package
import recordlinkage as rl
from recordlinkage.preprocessing import clean
# database interaction imports
from pyathenajdbc import connect
print( "Imports loaded at " + str( datetime.datetime.now() ) )
conn = connect(s3_staging_dir = 's3://usda-iri-2019-queryresults/',
region_name = 'us-gov-west-1',
LogLevel = '0',
workgroup = 'workgroup-iri_usda')
```
## Data Exploration
In our notebooks thus far, we have not utilized either `tip_2017` or `store_info_all`. To see what data manipulations we may have to perform, let's take a quick look at the two tables.
```
sql = '''
select *
from iri_usda.store_info_all
where year = '2017'
'''
df_total = pd.read_sql(sql, conn)
df_total.head()
df_total.describe()
# null values by column
df_total.isnull().sum()
# Output: count of null values for all of the columns
df_total.isin(['', '.']).sum()
```
Although there are null values for some columns in `store_info_all` in 2017, they are not in any of the variables that we will be using for the record linkage. Therefore, we will not have to do any manipulation with null values for `store_info_all`. Let's see if we have any extra work for `tip_2017`.
```
sql = '''
select *
from iri_usda.tip_2017
'''
df_approved = pd.read_sql(sql, conn)
df_approved.head()
df_approved.describe()
# Output: count of null values for all of the columns
df_approved.isnull().sum()
df_approved.isin(['', '.']).sum()
```
Unfortunately, we were not as lucky for `df_approved`. Let's see what's wrong, i.e. when `vendor_street_number` and/or `vendor_street` are missing.
```
# when street number is missing
qry = '''
select *
from iri_usda.tip_2017
where vendor_street_number = ''
limit 5
'''
pd.read_sql(qry, conn)
# count when all three street identifiers are missing
qry = '''
select count(*)
from iri_usda.tip_2017
where vendor_street_number = '' and vendor_street = '' and vendor_additional_address = ''
'''
pd.read_sql(qry, conn)
```
Since `store_info_all` only has one column for the street number and address, we will have to concatenate `vendor_street_number` and `vendor_street`. However, we can also see that when `vendor_street_number` and `vendor_street` are null, there is a value for `vendor_additional_address`. Therefore, we will overwrite `df_approved` with a SQL query that concatenates `vendor_street_number` and `vendor_street` when they exist, and otherwise uses `vendor_additional_address`.
> In practice, it would be best to separate the street number and name into two separate categories as was described in the lecture. However, that would require additional code and since the purpose of this notebook is to get familiarized with various record linkage techniques, that process will not be covered in this notebook.
```
# rewrite df_approved with only columns needed for linkage
sql = '''
select vendor_name,
case when vendor_street_number = '' and vendor_street = '' then vendor_additional_address
else concat(vendor_street_number, ' ',vendor_street)
end as address,
vendor_city, vendor_state, vendor_zip
from iri_usda.tip_2017
'''
df_approved = pd.read_sql(sql, conn)
#confirm df_approved is what we want
df_approved.head()
```
## The Importance of Pre-Processing
Data pre-processing is an important step in a data analysis project in general, and in record linkage applications, it is particularly crucial. The goal of pre-processing is to transform messy data into a dataset that can be used in a project workflow.
Linking records from different data sources comes with different challenges that need to be addressed by the analyst. The analyst must determine whether or not two entities (individuals, businesses, geographical units) from two different files are the same. This determination is not always easy. In most of the cases there is no common uniquely identifing characteristic for a entity. For example, is Bob Miller from New York the same person as Bob Miller from Chicago in a given dataset? This determination has to be executed carefully because consequences of wrong linkages may be substantial (i.e. Is person X the same person as the person X on the list of identified terrorists?). Pre-processing can help to make better informed decisions.
Pre-processing can be difficult because there are a lot of things to keep in mind. For example, data input errors, such as typos, misspellings, truncation, abbreviations, and missing values need to be corrected. Literature shows that pre-processing can improve matches. In some situations, 90% of the improvement in matching efficiency may be due to pre-processing. The most common reason why matching projects fail is the lack of time and resources for data cleaning.
In the following section, we will walk you through some pre-processing steps. Here we will touch upon practices that include but are not limited to removing spaces, parsing fields, and standardizing strings.
Let's look at the the most recurring store names in the two tables.
```
df_total['store_name'].value_counts()
df_approved['vendor_name'].value_counts()
```
***Right away, we notice that the record linkage between the different datasets will not be straightforward. The variable is messy and non-standardized, similar names can be written differently (in upper-case or lower-case characters, with or without suffixes, etc.) The essential next step is to process the variables in order to make the linkage the most effective and relevant possible.***
### Parsing String Variables
By default, the split method returns a list of strings obtained by splitting the original string on spaces or commas, etc. The record linkage package comes with a build in cleaning function we can also use. In addition, we can extract information from strings for example by using regex search commands.
```
# Uppercasing names and creating a new column of names to work with
df_total['store_name_clean']=df_total.store_name.str.upper()
df_total['address_clean'] = df_total.address.str.upper()
df_total['city_clean'] = df_total.city.str.upper()
df_total['state_clean'] = df_total.state.str.upper()
# Do same to df_approved
df_approved['vendor_name_clean'] = df_approved.vendor_name.str.upper()
df_approved['address_clean'] = df_approved.address.str.upper()
df_approved['vendor_city_clean'] = df_approved.vendor_city.str.upper()
df_approved['vendor_state_clean'] = df_approved.vendor_state.str.upper()
# see first five entries
df_approved.head()
# Cleaning names (using the record linkage package tool, see imports)
# Clean removes any characters such as '-', '.', '/', '\', ':', brackets of all types.
df_total['store_name_clean']=clean(df_total['store_name_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
df_total['address_clean']=clean(df_total['address_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
df_total['city_clean']=clean(df_total['city_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
df_total['state_clean']=clean(df_total['state_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
# Do same for df_approved
df_approved['vendor_name_clean']=clean(df_approved['vendor_name_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
df_approved['address_clean']=clean(df_approved['address_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
df_approved['vendor_city_clean']=clean(df_approved['vendor_city_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
df_approved['vendor_state_clean']=clean(df_approved['vendor_state_clean'], lowercase=False, strip_accents='ascii', \
remove_brackets=False)
df_total.head()
df_approved.head()
```
### Regular Expressions – `regex`
Regular expressions (regex) are a way of searching for a character pattern. They can be used for matching or replacing operations in strings.
When defining a regular expression search pattern, it is a good idea to start out by writing down, explicitly, in plain English, what you are trying to search for and exactly how you identify when you've found a match.
For example, if we look at an author field formatted as "<last_name> , <first_name> <middle_name>", in plain English, this is how I would explain where to find the last name: "starting from the beginning of the line, take all the characters until you see a comma."
In a regular expression, there are special reserved characters and character classes. For example:
- "`^`" matches the beginning of the line or cell
- "`.`" matches any character
- "`+`" means one or more repetitions of the preceding expressions
Anything that is not a special character or class is just looked for explicitly. A comma, for example, is not a special character in regular expressions, so inserting "`,`" in a regular expression will simply match that character in the string.
In our example, in order to extract the last name, the resulting regular expression would be:
"`^.+,`". We start at the beginning of the line ( "`^`" ), matching any characters ( "`.+`" ) until we come to the literal character of a comma ( "`,`" ).
> _If you want to actually look for one of these reserved characters, it must be escaped. For example, if the expression looks for a literal period, rather than the special regular expression meaning of a period, precede it with a back slash ( "`\`" ) to escape the reserved character in a regular expression. For example, "`\.`" will match a "`.`" character in a string._
__REGEX CHEATSHEET__
- abc... Letters
- 123... Digits
- \d Any Digit
- \D Any non-Digit Character
- . Any Character
- \. Period
- [a,b,c] Only a, b or c
- [^a,b,c] Not a,b, or c
- [a-z] Characters a to z
- [0-9] Numbers 0 to 9
- \w any Alphanumeric chracter
- \W any non-Alphanumeric character
- {m} m Repetitions
- {m,n} m to n repetitions
- * Zero or more repetitions
- + One or more repetitions
- ? Optional Character
- \s any Whitespace
- \S any non-Whitespace character
- ^...$ Starts & Ends
- (...) Capture Group
- (a(bc)) Capture sub-Group
- (.*) Capture All
- (abc|def) Capture abc or def
__Examples:__
- `(\d\d|\D)` will match 22X, 23G, 56H, etc...
- `(\w)` will match any characters between 0-9 or a-z
- `(\w{1-3})` will match any alphanumeric character of a length of 1 to 3.
- `(spell|spells)` will match spell or spells
- `(corpo?) will match corp or corpo
- `(feb 2.)` will match feb 20, feb 21, feb 2a, etc.
__Using REGEX to match characters:__
In python, to use a regular expression to search for matches in a given string, we use the built-in "`re`" package ( https://docs.python.org/2/library/re.html ), specifically the "`re.search()`" method. To use "`re.search()`", pass it the regular expression you want to use to search enclosed in quotation marks, and then the string you want to search within.
__Using REGEX for replacing characters:__
The `re` package also has an "`re.sub()`" method used to replace regular expressions by other strings. The method can be applied to an entire pandas column (replacing expression1 with expression2) with the following syntax: `df['variable'].str.replace(r'expression1', 'expression2')`. Note the `r` before the first string to signal we are using regular expressions.
In this notebook, we will not have to use regex too much to pre-process our data tables, but in general, knowing regex is essential for cleaning data. Here, we will show you how you can use regex to extract everything inside quotation marks from addresses in `df_approved`.
> The quotation marks were already removed in the previous steps, but this example shows how you can do the same process using regular expressions.
```
# Extracting address inside quotations in df_approved
# Pattern1
df_approved['address'].str.extract('"(.*)"')
# Breaking the code down:
# .*? ---- tells that we need any character 0 or more times after the first quotation mark
# () enclosing brackets tell that we need to extract this information in the new variable
# "" we need to find everything inside of the quotes
```
Do you see any other possible standardizations? Insert them below!
```
```
Now we are done with the inital data prep work. Please keep in mind that we just provided some examples for you to demonstrate the process. You can add as many further steps to it as necessary.
## Record Linkage
The record linkage package is a quite powerful tool for you to use when you want to link records within a dataset or across multiple datasets. It comes with different bulid in distances metrics and comparison functions, however, it also allows you to create your own. In general record linkage is divided in several steps.
```
# Only keep variables relevant for linkage
df_total = df_total[['store_name_clean', 'zipcode', 'address_clean', 'city_clean', 'state_clean']]
df_approved = df_approved[['vendor_name_clean', 'vendor_zip', 'address_clean', 'vendor_city_clean',
'vendor_state_clean']]
#rename df_approved to match df_total for simplicity sake
df_approved = df_approved.rename({'vendor_name_clean':'store_name_clean', 'vendor_zip':'zipcode',
'vendor_city_clean':'city_clean','vendor_state_clean':'state_clean'},
axis = 'columns')
df_total.head()
df_approved.head()
```
We've already done the pre-processing, so the next step is indexing the data we would like to link. Indexing allows you to create candidate links, which basically means identifying pairs of data rows which might refer to the same real world entity. This is also called the comparison space (matrix). There are different ways to index data. The easiest is to create a full index and consider every pair a match. This is also the least efficient method, because we will be comparing every row of one dataset with every row of the other dataset. Because of how extensive this process is, we will demonstrate it on just stores in New Mexico to limit its runtime.
```
# subset to just one state to demonstrate the `FullIndex()` method
nm_total = df_total[df_total['state_clean'] == 'NM']
nm_approved = df_approved[df_approved['state_clean'] == 'NM']
# Let's generate a full index first (comparison table of all possible linkage combinations)
indexer = rl.index.Full()
pairs = indexer.index(nm_total, nm_approved)
# Returns a pandas MultiIndex object
print(len(pairs))
```
We can do better if we actually include our knowledge about the data to eliminate bad links from the start. This can be done through blocking. The `recordlinkage` package gives you multiple options for this. For example, you can block by using variables, which means that only links exactly equal on specified values will be kept. Here, we will block on `zipcode` so we only compare stores with the same zip code.
You can also use a neighborhood index in which the rows in your dataframe are ranked by some value and python will only link between the rows that are close by.
```
#initialize indexer
indexer = rl.Index()
#block on zipcode
indexer.block('zipcode')
# Returns a pandas MultiIndex object
pairs2 = indexerBL.index(df_total, df_approved)
print(len(pairs2))
# Initiate compare object (we are using the blocked ones here)
# You want to give python the name of the MultiIndex and the names of the datasets
compare = rl.Compare()
```
Now we have set up our comparison space. We can start to compare our files and see if we find matches. We will demonstrate an exact match and rule based approches using distance measures. Our goal is to create a dataframe for each record pair and if they are listed as a match using different linking methods. To do so, we will include the `label` argument to store the algorithms' outputs as different variables in our dataframe.
As for the different comparative measures we will cover, they include:
- Exact
- Levenshtein
- Jarowinkler
```
# Exact comparison
# This compares all the pairs of strings for exact matches
# It is similar to a JOIN--
compare.exact('store_name_clean','store_name_clean', label = 'store_name_clean')
# This command gives us the probability of match between two strings based on the levenshtein distance
# The measure is 0 if there are no similarities in thee string, 1 if it's identical
compare.string('store_name_clean','store_name_clean', method='levenshtein', label = 'levenshtein_name')
# This command gives us the probability of match between two strings based on the jarowinkler distance
# The measure is 0 if there are no similarities in thee string, 1 if it's identical
compare.string('store_name_clean','store_name_clean', method='jarowinkler', label = 'jarowinkler_name')
#compute levenshtein distance for addresses
compare.string('address_clean', 'address_clean', method = 'levenshtein', label = 'levenshtein_address')
#we want exact matches for city names
compare.exact('city_clean', 'city_clean', label='city_clean')
```
To actually compute the record pairs, we need to call the `compute` method. This will output exactly what we want to see: each potential pairing when blocked on `zipcode`, with corresponding values for our different comparative metrics we've used.
```
# compute record pairing scores
features = compare.compute(pairs2, df_total, df_approved)
features.head()
features.describe()
```
## Results
Once we have our comparison measures, we need to classify the measure in matches and non matches for non-exact pairings (Levenshtein and Jarowinkler). A rule-based approach would be to say if the similarity of our indicators is 0.85 or higher we consider this a match, everything else we won't match. This decision need to be made by the analyst. We're going to use .85 for all of our distance computations simply because it is considered to be a standard in the field.
> In practice, you should examine matches around the thresholds you are considering to make sure the threshold lines up with how you theoretically view a match. Different methods return different results, so ideally, you would want to enact different thresholds based on the different comparative algorithms you are employing.
```
# Impose threshold of .85
features['levenshtein_name'] = [1 if v > .85 else 0 for v in features['levenshtein_name']]
features['jarowinkler_name'] = [1 if v > .85 else 0 for v in features['jarowinkler_name']]
features['levenshtein_address'] = [1 if v > .85 else 0 for v in features['levenshtein_address']]
features.head()
```
Now we need to decide which stores are the same between the two tables. Let's see the comparison results by seeing the distribution of scores (0-5 for the counts of 1 for the five measures).
```
features.sum(axis=1).value_counts().sort_index(ascending=False)
```
Arbitrarily, let's say that all comparisons with at least 3 metrics scored as a 1 are matches. Let's subset to just those matches.
```
features[features.sum(axis=1) >= 3]
```
Another way of classiying records is the Fellegi Sunter Method. If Fellegi Sunter is used to classify record pairs, you would follow all the step we have done so far. However, now, we would estimate probabilities to construct weights. These weights will then be applied during the classification to give certain characteristics more importance. For example, we are more certain that very unique names are a match than Bob Millers.
#### Fellegi Sunter
```
# let's assume the rows above are our matches
matches = features[features.sum(axis=1) >= 3]
len(features[features.sum(axis=1) >= 3])
## Generate Training Data and index
ml_pairs = matches[0:4000]
ml_matches_index = ml_pairs.index & pairs2
```
The Naive Bayes classifier is a probabilistic classifier. The probabilistic record linkage framework by Fellegi and Sunter (1969) is the most well-known probabilistic classification method for record linkage. Later, it was proved that the Fellegi and Sunter method is mathematically equivalent to the Naive Bayes method in case of assuming independence between comparison variables.
```
## Train the classifier
nb = rl.NaiveBayesClassifier()
nb.fit(ml_pairs, ml_matches_index)
## Predict the match status for all record pairs
result_nb = nb.predict(matches)
result_nb
```
### Evaluation
The last step is to evaluate the results of the record linkage. We will cover this in more detail in the machine learning session. This is just for completeness.
```
## Confusion matrix
conf_nb = rl.confusion_matrix(ml_pairs, result_nb, len(matches))
conf_nb
## Precision and Accuracy
precision = rl.precision(conf_nb)
accuracy = rl.accuracy(conf_nb)
## Precision and Accuracy
print(precision)
print(accuracy)
## The F-score for this classification is
rl.fscore(conf_nb)
```
## References and Further Readings
### Parsing
* Python online documentation: https://docs.python.org/2/library/string.html#deprecated-string-functions
* Python 2.7 Tutorial(Splitting and Joining Strings): http://www.pitt.edu/~naraehan/python2/split_join.html
### Regular Expression
* Python documentation: https://docs.python.org/2/library/re.html#regular-expression-syntax
* Online regular expression tester (good for learning): http://regex101.com/
### String Comparators
* GitHub page of jellyfish: https://github.com/jamesturk/jellyfish
* Different distances that measure the differences between strings:
- Levenshtein distance: https://en.wikipedia.org/wiki/Levenshtein_distance
- Damerau–Levenshtein distance: https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
- Jaro–Winkler distance: https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance
- Hamming distance: https://en.wikipedia.org/wiki/Hamming_distance
- Match rating approach: https://en.wikipedia.org/wiki/Match_rating_approach
### Fellegi-Sunter Record Linkage
* Introduction to Probabilistic Record Linkage: http://www.bristol.ac.uk/media-library/sites/cmm/migrated/documents/problinkage.pdf
* Paper Review: https://www.cs.umd.edu/class/spring2012/cmsc828L/Papers/HerzogEtWires10.pdf
| github_jupyter |
# Object oriented programming
# Lab 03
## February 23, 2018
## 1. Basic exercises
### 1.1 Define a class named A with a contructor that takes a single parameter and stores it in an attribute named `value`. Add a `print_value` method to the class.
Instantiate the class and call the `print_value` method.
### 1.2 Redefine the class's __init__ so that it can be instantiated without a parameter. If it is called without a parameter, value should be 42.
### 1.3 Define a class named B, whose __init__ takes two parameters and stores one in a public attribute and the other in a private attribute named this_is_public and __this_is_private respectively.
Check the class's __dict__ attribute and find out the mangled name of the private attribute.
```
b = B(1, 2)
assert b.this_is_public == 1
try:
b.__this_is_private
print("This should not happen.")
except AttributeError:
print("Failed to access private attribute, this is good :)")
```
## 2. Inheritance
### 2.1 Guess the output without running the cell.
```
class A(object): pass
class B(A): pass
class C(A): pass
class D(B): pass
a = A()
b = B()
c = C()
d = D()
print(isinstance(a, object))
print(isinstance(b, object))
print(isinstance(a, B))
print(isinstance(b, A))
print(isinstance(d, A))
print(issubclass(C, object))
print(issubclass(D, B))
print(issubclass(B, D))
print(issubclass(B, B))
```
### 2.2 Create a Cat, a Dog, a Fish and a Eagle class.
The animals have the following attributes:
1. cats, dogs and eagles can make a sound (this should be a make_sound function that prints the animals sound),
2. all animals have an age and a number_of_legs attribute,
3. cats and dogs have a fur_color attribute. They can be instantiated with a single color or a list or tuple of colors.
Use inheritance and avoid repeating code. Use default values in the constructors.
```
cat = Cat("Fluffy", age=3, fur="white")
dog = Dog("Cherry", age=1, fur=("white", "brown", "black"))
fish = Fish("King")
eagle = Eagle("Bruce", age=2)
animals = [cat, dog, fish, eagle]
```
Iterate over the list animals and call make_sound for each animal. Print either the sound the animal makes or "XY does not make a sound" if the animal does not make a sound (fish). This is an example of duck typing.
## 3. `RationalNumber` class
Write a class that represents a rational number. A number is rational if it is can be expressed as the quotient of two integers (p and q). Define the operators seen in the tests below.
Make sure that p and q are always relative primes (you can use `math.gcd`).
```
from math import gcd
class RationalNumber(object):
# TODO
r = RationalNumber(43, 2)
assert r + r == RationalNumber(43) # q = 1 in this case
assert r * 2 == r + r
r1 = RationalNumber(3, 2)
r2 = RationalNumber(4, 3)
assert r1 * r2 == RationalNumber(12, 6)
assert r1 / r2 == RationalNumber(9, 8)
assert r1 == RationalNumber(6, 4)
```
### RationalNumber advanced exercises
Make the class usable as a dictionary key.
```
r1 = RationalNumber(3)
r2 = RationalNumber(3, 1)
r3 = RationalNumber(3, 2)
d = {r1: 1, r2: 2, r3: 12}
assert(len(d) == 2)
```
`p` and `q` can only be integers. Raise a `RationalNumberValueError` if someone tries to set them to anything else.
```
try:
r1.p = 3.4
except RationalNumberValueError:
print("This should happen")
else:
print("This shouldn't happen")
try:
r1.q = 3.4
except ValueError:
print("This should happen")
else:
print("This shouldn't happen")
```
Rational numbers may be negative. Make sure that `q` is never negative.
```
r = RationalNumber(3, -2)
assert r.p == -3 and r.q == 2
assert abs(r) == 1.5
```
Add a `from_str` factory method which parses the following formats:
```
r = RationalNumber(-3, 2)
assert RationalNumber.from_str("-3/2") == r
assert RationalNumber.from_str("3/-2") == r
assert RationalNumber.from_str("3 / -2") == r
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import sys
import casadi as ca
import os
import matplotlib.pyplot as plt
sys.path.insert(0, '../../src')
from pymoca.backends.xml import model, sim_scipy, analysis
from pymoca.backends.xml import parser as parse_xml
from pymoca.backends.xml.generator import generate as generate_xml
from pymoca.parser import parse as parse_mo
```
## Bouncing Ball
This example:
1. Reads a ModelicaXML file
2. Parses it and creates an Casadi HybridDAE model.
3. Converts the HybridDAE model to a HybridODE model.
4. Simulates it using scipy.integration
```
m1_xml = os.path.join(
'..', 'models', 'bouncing-ball.xml')
m1_ca = parse_xml.parse_file(m1_xml)
m1_ca
m1_ode = m1_ca.to_ode()
m1_ode
m1_ode.prop['x']['start'] = 1
data1 = sim_scipy.sim(m1_ode, {'dt': 0.01, 'tf': 3.5, 'integrator': 'dopri5'})
plt.figure(figsize=(15, 10))
analysis.plot(data1, marker='.', linewidth=1, markersize=5)
```
## Simple Circuit
This example:
1. Reads a Modelica file
2. Converts the Modelica file to ModelicaXML
3. Parses the ModelicaXML file and creates a Casadi HybridDAE model.
3. Converts the HybridDAE model to a HybridODE model.
4. Simulates it using scipy.integration
```
simple_circuit_file = os.path.join(
'..', 'models', 'SimpleCircuit.mo')
m2_mo = parse_mo(open(simple_circuit_file, 'r').read())
m2_xml = generate_xml(m2_mo, 'SimpleCircuit')
m2_ca = parse_xml.parse(m2_xml)
m2_ca
m2_ode = m2_ca.to_ode()
m2_ode
m2_ode.prop.keys()
m2_ode.prop['AC.f']['value'] = 60
m2_ode.prop['AC.VA']['value'] = 110
data2 = sim_scipy.sim(m2_ode, {'dt': 0.001, 'tf': 0.5, 'integrator': 'dopri5'})
plt.figure(figsize=(15, 10))
analysis.plot(data2, fields=['x'])
```
## Noise Simulation
This example demonstrates using the string based parser and also noise simulation.
```
model_txt = """
model Simple
Real x(start=0);
discrete Real y;
discrete Real v;
discrete Real time_last(start=0);
equation
der(x) = v;
when (abs(x) >= 2) then
reinit(x, 0);
end when;
when (time - time_last > 0.1) then
v = 1 + noise_gaussian(0, 0.1);
y = x + noise_gaussian(0, 0.1);
time_last = time;
end when;
end Simple;
"""
m3_mo = parse_mo(model_txt)
m3_xml = generate_xml(m3_mo, 'Simple')
print(m3_xml)
m3_ca = parse_xml.parse(m3_xml)
m3_ca
m3_ode = m3_ca.to_ode()
m3_ode
m3_ode.prop['x']['start'] = 0
data = sim_scipy.sim(m3_ode, {'dt': 0.01, 'tf': 3})
plt.figure(figsize=(15, 10))
analysis.plot(data, fields=['m', 'x', 'y'])
```
| github_jupyter |
# Interactive Widget: Front End Code: Bagging Classifier
This is our official final version of the widget.
Throughout this workbook, we used steps from the following web pages to inform our widgets.
- https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Basics.html
- https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html
- https://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html
### Set Up
```
# Import the necessary data libraries.
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split as tts
from sklearn.ensemble import BaggingClassifier
import scipy.stats as stats
# The following are for Classification Accuracy.
from sklearn import metrics
# The following are for Jupyter Widgets.
import ipywidgets as widgets
from IPython.display import display
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from ipywidgets import FloatSlider
```
Although the URLs say they were for the K-Neighbors widget, we did not change anything in the resampling or testing data, so we will still use these files.
```
# Set up datasets.
X_resampled_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/interim/X_resampled_forKNeighborWidget.csv'
X_resampled = pd.read_csv(X_resampled_url, sep = ',', engine = 'python')
y_resampled_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/interim/y_resampled_forKNeighborWidget.csv'
y_resampled = pd.read_csv(y_resampled_url, sep = ',', engine = 'python')
X_test_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/interim/X_test_forKNeighborWidget.csv'
X_test = pd.read_csv(X_test_url, sep = ',', engine = 'python')
y_test_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/interim/y_test_forKNeighborWidget.csv'
y_test = pd.read_csv(y_test_url, sep = ',', engine = 'python')
```
We know from testing the type of `y_resampled` in `InteractiveWidget_BackEnd.ipynb` that `y_resampled` and `y_test` need to be a series in order for our model to run correctly. We also know from this site (https://datatofish.com/pandas-dataframe-to-series/) how to change a dataframe into a series.
```
# Change the y_resampled dataframe into a y_resampled series.
y_resampled = y_resampled.squeeze()
# Change the y_test dataframe into a y_test series.
y_test = y_test.squeeze()
```
### Create the Modeling Functions
```
# Create the function score_model.
def widgetpred(X_resampled, y_resampled, X_test, y_test, input_test, estimator, **kwargs):
"""
Test various estimators.
"""
# Instantiate the classification model and visualizer.
estimator.fit(X_resampled, y_resampled, **kwargs)
expected = y_test
predicted = estimator.predict(X_test)
inputpred = estimator.predict(input_test)
# Compute and return the prediction.
return [predicted, inputpred]
# Create the function conmatrix.
def conmatrix(y_test, predicted, inputpred):
"""
Compute the confusion matrix and return the results.
"""
confusion = metrics.confusion_matrix(y_test, predicted)
TP = confusion[1, 1]
TN = confusion[0, 0]
FP = confusion[0, 1]
FN = confusion[1, 0]
# When the prediction is positive, how often is it correct? Define truepositive_rate.
truepositive_rate = round((TP / (TP + FP)) * 100, 2)
# When the prediction is negative, how often is it correct? Define truenegative_rate.
truenegative_rate = round((TN / (TN + FN)) * 100, 2)
# Use an if-else statement to print a statement about the true positive or negative rate, depending on the prediction.
if inputpred == 1:
print("When our model predicts that a car will finish the race, it is correct", truepositive_rate, "% of the time.")
else:
print("When our model predicts that a car will not finish the race, it is correct", truenegative_rate, "% of the time.")
```
### Create the Widget
```
"""
Establish function "predict" which allows selection of two track types, whether
the track is historic or not, and how popular the circuit is,
as well as the input of one of each of the following values:
year, grid, alt, average_lap_time, minimum_lap_time, PRCP, TAVG.
Place these values in the dataframe input_df and display the dataframe.
Create prediction based on widgetpred function and display the prediction:
0 for did not finish, 1 for did finish.
"""
def predictfinish(trackType, historic, circuit, year, grid, alt, average_lap_time, normalized_minLapTime, PRCP, TAVG):
# Use an if-else statement to determine the output based on the input track.
if trackType == "Race":
trackType = 0
else:
trackType = 1
# Use an if-else statement to determine the output based on the input historic.
if historic == "Not Historic":
isHistoric = 0
else:
isHistoric = 1
# Use an if-else statement to determine the output based on the input circuit.
if circuit == "Used 500+ times":
oneHot_circuits_1 = 1
oneHot_circuits_2 = 0
oneHot_circuits_3 = 0
oneHot_circuits_4 = 0
oneHot_circuits_5 = 0
oneHot_circuits_6 = 0
elif circuit == "Used 400-499 times":
oneHot_circuits_1 = 0
oneHot_circuits_2 = 1
oneHot_circuits_3 = 0
oneHot_circuits_4 = 0
oneHot_circuits_5 = 0
oneHot_circuits_6 = 0
elif circuit == "Used 300-399 times":
oneHot_circuits_1 = 0
oneHot_circuits_2 = 0
oneHot_circuits_3 = 1
oneHot_circuits_4 = 0
oneHot_circuits_5 = 0
oneHot_circuits_6 = 0
elif circuit == "Used 200-299 times":
oneHot_circuits_1 = 0
oneHot_circuits_2 = 0
oneHot_circuits_3 = 0
oneHot_circuits_4 = 1
oneHot_circuits_5 = 0
oneHot_circuits_6 = 0
elif circuit == "Used 100-199 times":
oneHot_circuits_1 = 0
oneHot_circuits_2 = 0
oneHot_circuits_3 = 0
oneHot_circuits_4 = 0
oneHot_circuits_5 = 1
oneHot_circuits_6 = 0
elif circuit == "Used less than 100 times":
oneHot_circuits_1 = 0
oneHot_circuits_2 = 0
oneHot_circuits_3 = 0
oneHot_circuits_4 = 0
oneHot_circuits_5 = 0
oneHot_circuits_6 = 1
# Transform average_lap_time.
normalized_avgLapTime = np.log(average_lap_time)
# Use an if-else statement to move any potential outliers from average_lap_time.
avgQ1 = -0.019303
avgQ3 = 0.006690
avgIQR = avgQ3 - avgQ1
avglowertail = avgQ1 - 2.5 * avgIQR
avguppertail = avgQ3 + 2.5 * avgIQR
avgmedian = -0.005962837883204569
if normalized_avgLapTime > avguppertail or normalized_avgLapTime < avglowertail:
normalized_avgLapTime = avgmedian
# Use an if-else statement to move any potential outliers from normalized_minLapTime.
minQ1 = 0.984717
minQ3 = 1.006281
minIQR = minQ3 - minQ1
minlowertail = minQ1 - 2.0 * minIQR
minuppertail = minQ3 + 2.0 * minIQR
minmedian = 0.995628475361378
if normalized_minLapTime > minuppertail or normalized_minLapTime < minlowertail:
normalized_minLapTime = minmedian
# Transform altitude.
alt_trans = np.log(alt + 1 - (-7))
# Transform precipitation.
PRCP_trans = np.log(PRCP + 1)
# Establish the data of our input_df dataframe.
inputdata = [[grid, trackType, year, TAVG, isHistoric, oneHot_circuits_1, oneHot_circuits_2,
oneHot_circuits_3, oneHot_circuits_4, oneHot_circuits_5, oneHot_circuits_6, alt_trans,
PRCP_trans, normalized_minLapTime, normalized_avgLapTime]]
# Establish the dataframe input_df itself with pd.DataFrame.
input_df = pd.DataFrame(inputdata, columns =
['grid', 'trackType', 'year', 'TAVG',
'isHistoric', 'oneHot_circuits_1', 'oneHot_circuits_2',
'oneHot_circuits_3', 'oneHot_circuits_4', 'oneHot_circuits_5',
'oneHot_circuits_6', 'alt_trans', 'PRCP_trans', 'normalized_minLapTime',
'normalized_avgLapTime'])
display(input_df)
# Using the widgetpred function, predict whether the car will finish the race or not given input_df.
pred = widgetpred(X_resampled, y_resampled, X_test, y_test, input_df, BaggingClassifier())
# Using an if-else statement, determine what interactors will see given the data they input.
if pred[1] == 1:
writtenpred = "finish the race."
else:
writtenpred = "not finish the race."
# Print the model's prediction.
print("According to our Bagging Classifier model, your car is predicted to", writtenpred)
"""
Using the conmatrix function, print out a statement about
the true positive or negative rate, depending on the prediction.
"""
conmatrix(y_test, pred[0], pred[1])
# Create a widget that will interact with the predictfinish function.
interact(predictfinish, trackType = widgets.Dropdown(options = ["Race", "Street"], value = "Race", description = 'Track Type'),
historic = widgets.Dropdown(options = ["Not Historic", "Historic"], value = "Not Historic", description = 'Historic?'),
circuit = widgets.Dropdown(options = ["Used 500+ times", "Used 400-499 times", "Used 300-399 times", "Used 200-299 times", "Used 100-199 times", "Used less than 100 times"], value = "Used less than 100 times", description = 'Circuit'),
year = widgets.IntSlider(min = 1996, max = 2021, description = 'Year', disabled = False, continuous_update = False),
grid = widgets.IntSlider(min = 0, max = 30, description = 'Grid', disabled = False, continuous_update = False),
alt = widgets.BoundedFloatText(min = -100, max = 2500, description = 'Altitude', disabled = False, continuous_update = False),
average_lap_time = widgets.FloatSlider(min = 0.1, max = 6.0, value = 0.1, description = 'Avg Lap Time', disabled = False, continuous_update = False),
normalized_minLapTime = widgets.FloatSlider(min = 0.1, max = 6.0, value = 0.1, description = 'Min Lap Time', disabled = False, continuous_update = False),
PRCP = widgets.FloatSlider(min = 0, max = 10, description = 'Precipitation', disabled = False, continuous_update = False),
TAVG = widgets.FloatSlider(min = 0, max = 110, description = 'Avg Temp (F)', disabled = False, continuous_update = False));
```
| github_jupyter |
```
import pandas as pd
import numpy as np
df = pd.read_csv('WeNoGetPresident.csv')
del df['Unnamed: 0'] #Delete column
df.rename(columns={"created_at":"Time_Posted",
"text":"Tweet",
"source":"Tweet_Source",
"description":"Bio"}, inplace=True) #Rename Created_at, Tweet and Source
df.head(5)
df['Tweet'].astype('str') #Convert to strings
df['Tweet'].astype('str')
df.dtypes
import re #Import regular expression
```
Clean Tweet
```
def remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i, '', input_txt)
return input_txt
df['tidy_tweet'] = np.vectorize(remove_pattern)(df['Tweet'], "@[\w]*") #remove @ and * from tweet
# remove special characters, numbers, punctuations
df['tidy_tweet'] = df['tidy_tweet'].str.replace("[^a-zA-Z#]", " ")
df['tidy_tweet'] = df['tidy_tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3])) #Remove text less than 3 words
del df['Tweet'] #delete tweet column
del df['Bio'] #delete bio column which is not neccessary
df['tidy_tweet'] = df.tidy_tweet.str.lower() #Convert string to lower case
```
Cleaning tweet source
```
# remove special characters, numbers, punctuations from tweet source
df['tidy_Tweet_Source'] = df['Tweet_Source'].str.replace("[^a-zA-Z#]", " ")
del df['Tweet_Source'] #delete tweet column
```
Remove the leading and tailing string so we can have only devices tweeted from
```
df['final_1'] = df['tidy_Tweet_Source'].str.rstrip(' a')
df['tweet_source'] = df['final_1'].str.lstrip('hootsuite com rel nofollow atchnigeria com rel nofollow a href http twitter com download android rel nofollow Twitter for download iphone rel nofollow Twitter for bile twitter com rel nofollow Twitter # download ipad rel nofollow Twitter for ze wp com rel nofollow v rel nofollowk rel nofollow ks assure ng rel nofollow ' )
del df['tidy_Tweet_Source']
del df['final_1']
del df['Time_Posted']
df.head(5)
```
Clean Location column
```
# remove special characters, numbers, punctuations
df['tidy_Location'] = df['location'].str.replace("[^a-zA-Z#]", " ")
df['location'].value_counts()
df.head(5)
del df['location']
df.tweet_source.value_counts()
df['tweet_source'].replace({'Phone': 'Iphone',
'Pad':'Ipad'},
inplace = True)
# display
df
df.tidy_Location.dropna(inplace =True) #Drop Null value in location column
df['tidy_Location'].replace({'NaN': 'empty',},
inplace = True)
df.to_csv("WeNoGetPresido.csv", index=False)
```
| github_jupyter |
# Using an SBML model
## Getting started
### Installing libraries
Before you start, you will need to install a couple of libraries:
The [ModelSeedDatabase](https://github.com/ModelSEED/ModelSEEDDatabase) has all the biochemistry we'll need. You can install that with `git clone`.
The [PyFBA](http://linsalrob.github.io/PyFBA) library has detailed [installation instructions](http://linsalrob.github.io/PyFBA/installation.html). Don't be scared, its mostly just `pip install`.
(Optional) Also, get the [SEED Servers](https://github.com/linsalrob/SEED_Servers_Python) as you can get a lot of information from them. You can install the git python repo from github. Make sure that the SEED_Servers_Python is in your PYTHONPATH.
We start with importing some modules that we are going to use.
We import *sys* so that we can use standard out and standard error if we have some error messages.<br>
We import *copy* so that we can make a deep copy of data structures for later comparisons.<br>
Then we import the *PyFBA* module to get started.
```
import sys
import os
import copy
import PyFBA
import pickle
```
## Sharing the data
If you set this variable to true, we will export some of the data, as either `txt` files or `pickle` files, and then you can import them into other notebooks to explore the data
```
share_data = True
```
## Running an SBML model
If you have run your genome through RAST, you can download the [SBML](http://www.sbml.org/) model and use that directly.
We have provided an [SBML model of *Citrobacter sedlakii*](https://raw.githubusercontent.com/linsalrob/PyFBA/master/example_data/Citrobacter/Citrobacter_sedlakii.sbml) that you can download and use. You can right-ctrl click on this link and save the SBML file in the same location you are running this iPython notebook.
We use this SBML model to demonstrate the key points of the FBA approach: defining the reactions, including the boundary, or drainflux, reactions; the compounds, including the drain compounds; the media; and the reaction bounds.
We'll take it step by step!
We start by parsing the model:
```
sbml = PyFBA.parse.parse_sbml_file("../example_data/Citrobacter/Citrobacter_sedlakii.sbml")
```
### Find all the reactions and identify those that are boundary reactions
We need a set of reactions to run in the model. In this case, we are going to run all the reactions in our SBML file. However, you can change this set if you want to knock out reactions, add reactions, or generally modify the model. We store those in the `reactions_to_run` set.
The boundary reactions refer to compounds that are secreted but then need to be removed from the `reactions_to_run` set. We usually include a consumption of those compounds that is open ended, as if they are draining away. We store those reactions in the `uptake_secretion_reactions` dictionary.
```
# Get a dict of reactions.
# The key is the reaction ID, and the value is a metabolism.reaction.Reaction object
reactions = sbml.reactions
reactions_to_run = set()
uptake_secretion_reactions = {}
biomass_equation = None
for r in reactions:
if 'biomass_equation' == r:
biomass_equation = reactions[r]
print(f"Our biomass equation is {biomass_equation.readable_name}")
continue
is_boundary = False
for c in reactions[r].all_compounds():
if c.uptake_secretion:
is_boundary = True
break
if is_boundary:
reactions[r].is_uptake_secretion = True
uptake_secretion_reactions[r] = reactions[r]
else:
reactions_to_run.add(r)
```
At this point, we can take a look at how many reactions are in the model, not counting the biomass reaction:
```
print(f"The biomass equation is {biomass_equation}")
print(f"There are {len(reactions)} reactions in the model")
print(f"There are {len(uptake_secretion_reactions)} uptake/secretion reactions in the model")
print(f"There are {len(reactions_to_run)} reactions to be run in the model")
if share_data:
with open('sbml_reactions.txt', 'w') as out:
for r in reactions:
out.write(f"{r}\n")
```
### Find all the compounds in the model, and filter out those that are secreted
We need to filter out uptake and secretion compounds from our list of all compounds before we can make a stoichiometric matrix.
```
all_compounds = sbml.compounds
# Filter for compounds that are boundary compounds
filtered_compounds = set()
for c in all_compounds:
if not c.uptake_secretion:
filtered_compounds.add(c)
```
Again, we can see how many compounds there are in the model.
```
print(f"There are {len(all_compounds)} total compounds in the model")
print(f"There are {len(filtered_compounds)} compounds that are not involved in uptake and secretion")
```
And now we have the size of our stoichiometric matrix! Notice that the stoichiometric matrix is composed of the reactions that we are going to run and the compounds that are in those reactions (but not the uptake/secretion reactions and compounds).
```
print(f"The stoichiometric matrix will be {len(reactions_to_run):,} reactions by {len(filtered_compounds):,} compounds")
```
### Read the media file, and correct the media names
In our [media](https://github.com/linsalrob/PyFBA/tree/master/media) directory, we have a lot of different media formulations, most of which we use with the Genotype-Phenotype project. For this example, we are going to use Lysogeny Broth (LB). There are many different formulations of LB, but we have included the recipe created by the folks at Argonne so that it is comparable with their analysis. You can download [ArgonneLB.txt](https://raw.githubusercontent.com/linsalrob/PyFBA/master/media/ArgonneLB.txt) and put it in the same directory as this iPython notebook to run it.
Once we have read the file we need to correct the names in the compounds. Sometimes when compound names are exported to the SBML file they are modified slightly. This just corrects those names.
```
# Read the media file
#media = PyFBA.parse.read_media_file("/home/redwards/.local/lib/python3.9/site-packages/PyFBA-2.1-py3.9.egg/PyFBA/Biochemistry/media/ArgonneLB.txt")
# mediafile = "MOPS_NoC_L-Methionine"
mediafile = 'ArgonneLB'
# ediafile = 'MOPS_NoC_D-Glucose'
media = PyFBA.parse.pyfba_media(mediafile)
# Correct the names
media = sbml.correct_media(media)
print(f"The media has {len(media)} compounds")
```
### Set the reaction bounds for uptake/secretion compounds
The uptake and secretion compounds typically have reaction bounds that allow them to be consumed (i.e. diffuse away from the cell) but not produced. However, our media components can also increase in concentration (i.e. diffuse to the cell) and thus the bounds are set higher. Whenever you change the growth media, you also need to adjust the reaction bounds to ensure that the media can be consumed!
```
# Adjust the lower bounds of uptake secretion reactions
# for things that are not in the media
mcr = 0
for u in uptake_secretion_reactions:
# just reset the bounds in case we change media and re-run this block
reactions[u].lower_bound = -1000.0
uptake_secretion_reactions[u].lower_bound = -1000.0
reactions[u].upper_bound = 1000.0
uptake_secretion_reactions[u].upper_bound = 1000.0
is_media_component = False
override = False
for c in uptake_secretion_reactions[u].all_compounds():
if c in media:
is_media_component = True
if is_media_component:
mcr += 1
else:
reactions[u].lower_bound = 0.0
uptake_secretion_reactions[u].lower_bound = 0.0
# these are the reactions that allow the media components to flux
# print(f"{u} {sbml.reactions[u].equation} ({sbml.reactions[u].lower_bound}, {sbml.reactions[u].upper_bound})")
print(f"There are {mcr} reactions (out of {len(uptake_secretion_reactions)}) with a media component")
```
### Run the FBA
Now that we have constructed our model, we can run the FBA!
```
ms = PyFBA.model_seed.ModelData(compounds = filtered_compounds, reactions = reactions)
status, value, growth = PyFBA.fba.run_fba(ms, reactions_to_run, media, biomass_equation,
uptake_secretion_reactions, verbose=True)
print("The FBA completed with a flux value of {} --> growth: {}".format(value, growth))
```
# Export the components of the model
This demonstrates how to export and import the components of this model, so you can do other things with it!
```
if share_data:
pickle.dump(filtered_compounds, open('compounds.pickle', 'wb'))
pickle.dump(reactions, open('reactions.pickle', 'wb'))
pickle.dump(reactions_to_run, open('reactions_to_run.pickle', 'wb'))
pickle.dump(media, open('media.pickle', 'wb'))
pickle.dump(biomass_equation, open('sbml_biomass.pickle', 'wb'))
pickle.dump(uptake_secretion_reactions, open('uptake_secretion_reactions.pickle', 'wb'))
if share_data:
sbml_filtered_compounds = pickle.load(open('compounds.pickle', 'rb'))
sbml_reactions = pickle.load(open('reactions.pickle', 'rb'))
sbml_reactions_to_run = pickle.load(open('reactions_to_run.pickle', 'rb'))
sbml_media = pickle.load(open('media.pickle', 'rb'))
sbml_biomass_equation = pickle.load(open('sbml_biomass.pickle', 'rb'))
sbml_uptake_secretion_reactions = pickle.load(open('uptake_secretion_reactions.pickle', 'rb'))
ms = PyFBA.model_seed.ModelData(compounds = sbml_filtered_compounds, reactions = sbml_reactions)
status, value, growth = PyFBA.fba.run_fba(ms, sbml_reactions_to_run, sbml_media, sbml_biomass_equation,
sbml_uptake_secretion_reactions, verbose=True)
print("The FBA completed with a flux value of {} --> growth: {}".format(value, growth))
```
| github_jupyter |
# JavaScript and HTML Tricks in a Jupyter Notebook
Normally I use [JSFiddle](https://jsfiddle.net/) to mock up JavaScript concepts but at work we use Jupyter Notebooks for design and documentation so it is handy to be able to demonstrate new web client features within a particular notebook.
### Custom CSS
Use `%%html` cell magic, e.g.
```html
%%html
<style>
.clearfix {
overflow: auto;
}
</style>
```
Using Python.
```python
from IPython.core.display import HTML
def _set_css_style(css_file_path):
"""
Read the custom CSS file and load it into Jupyter.
Pass the file path to the CSS file.
"""
styles = open(css_file_path, "r").read()
s = '<style>%s</style>' % styles
return HTML(s)
```
Then invoke in the notebook with
```python
set_css_style('css/custom.css')
```
Alter configuration for all notebooks.
```bash
~/.jupyter/custom/custom.css
~/.jupyter/custom/custom.js
```
```
%%javascript
var styleSheet = jQuery("<style type='text/css'>" +
".clearfix{overflow:auto;} " +
".debug-border{border:1px blue dashed;overflow:auto} " +
".div-box{background-color:red; color:white; font-weight:bold; margin: 2px; height:25px; width:200px; text-align:center} " +
".left{float:left;} " +
".light-blue-fill{background-color:lightblue; text-align:center} " +
".redbold{color:#f00; font-weight:bold;} " +
"</style>");
var parentDiv = jQuery("<div id='parent' class='redbold light-blue-fill debug-border'/>").text("Starting out simple...");
styleSheet.appendTo(parentDiv);
element.append(parentDiv);
var boxParent = jQuery("<div class='clearfix' id='1stRow'/>");
boxParent.addClass("clearfix");
element.append(boxParent);
var div = jQuery("<div class='left div-box'/>").text("No need for <p> tags");
div.addClass("left div-box");
div.appendTo(boxParent);
// The style sheet is already in the DOM, so we should be able to refer to it
// without having to specifically add a class to the div, e.g.
// div.addClass("div-box");
div = jQuery("<div class='left cfblue-box div-box'/>").text("Just another box");
jQuery("<style type='text/css'>.cfblue-box{background-color:cornflowerblue;}</style>").appendTo(div);
div.appendTo(boxParent);
boxParent = jQuery("<div class='clearfix' id='2ndRow'/>");
element.append(boxParent);
div = jQuery("<div class='left blue-box div-box'/>").text("Even more");
jQuery("<style type='text/css'>.blue-box{background-color:blue;}</style>").appendTo(div);
div.appendTo(boxParent);
div = jQuery("<div class='left orange-box div-box'/>").text("Yet another box");
jQuery("<style type='text/css'>.orange-box{background-color:orange;}</style>").appendTo(div);
div.appendTo(boxParent);
```
## Open a JSON File
Use the `<input type="file">` type. In this case, we have constrained it to look for `JSON` files.
```
%%html
<script>
var a = {
handleFiles: function (files, source) {
var reader = new FileReader();
reader.onload = (function(files, source) {
return function(e) {
$('#jsonarea').val(e.target.result);
};
})(files[0]);
reader.readAsText(files[0]);
}
};
</script>
<p>There are JSON Files for testing in the json folder.</p>
<div class="load">
<textarea spellcheck="false" id="jsonarea" placeholder="JSON shown here"></textarea>
<pre id="theError" class="error"></pre>
<span class="fileInput"><input type="file" accept=".json" id="jsonFile" onchange="a.handleFiles(this.files, 'load')"></span>
</div>
```
## Output JSON within HTML in a Cell
```
%%javascript
// A JSON object to output inline.
var testJson = {
name: "Hortense",
title: "Queen consort of Holland"
};
// Abuse an array to build up some HTML.
var html = [];
html.push(
"<h3>Inline JSON</h3>",
"See JSON below.",
"<br/>",
"<p><b>JSON Output</b>:</p>"
);
element.html(html.join(""));
var pre = jQuery("<pre id='out'></pre>");
pre.append(JSON.stringify(testJson, null, 2));
element.append(pre);
```
## References
1. [Theming IPython Jupyter Notebooks](http://sherifsoliman.com/2016/01/11/theming-ipython-jupyter-notebook/)
2. [Jupyter goodness](https://martinapugliese.github.io/jupyter-customise/) See the "Customising the Markdown style" section.
3. [Jupyter Notebook Custom CSS](https://gist.github.com/pmlandwehr/6bd26d0aabab5963a34dcaba1d6a18d4)
4. [Customizing the CSS style in the notebook](http://2014.es.pycon.org/static/talks/Hacking%20the%20notebook%20-%20Kiko%20Correoso/07-Styling_the_notebook.slides.html)
5. [IPython customizations](https://gist.github.com/jhamrick/66a76322ab8d1ff0f49f) - I had to include this one as it uses the [Solarized](https://ethanschoonover.com/solarized/) color theme!
6. [Jupyter Themer](https://github.com/transcranial/jupyter-themer)
7. [How do I set custom CSS for my IPython/IHaskell/Jupyter Notebook?](https://stackoverflow.com/questions/32156248/how-do-i-set-custom-css-for-my-ipython-ihaskell-jupyter-notebook)
| github_jupyter |
```
import pandas as pd
import numpy as np
import time
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing as pp
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
import xgboost as xgb
from sklearn.ensemble import BaggingClassifier
import lightgbm as lgb
from sklearn.naive_bayes import GaussianNB
from sklearn import preprocessing as pp
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from statistics import mode
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import xgboost as xgb
import lightgbm as lgb
#Todas las librerías para los distintos algoritmos
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import LinearSVC
from sklearn.svm import OneClassSVM
from sklearn.svm import SVC
from sklearn.svm import NuSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import BaggingClassifier
import statistics
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier
from pylab import rcParams
from collections import Counter
data_train= pd.read_csv("./datos/train.csv",na_values=["?"])
data_test= pd.read_csv("./datos/test.csv",na_values=["?"])
data_trainCopia = data_train.copy()
data_testCopia = data_test.copy()
Nombre = LabelEncoder().fit(pd.read_csv("./datos/nombre.csv").Nombre)
Año = LabelEncoder().fit(pd.read_csv("./datos/ao.csv").Año)
Ciudad = LabelEncoder().fit(pd.read_csv("./datos/ciudad.csv").Ciudad)
Combustible = LabelEncoder().fit(pd.read_csv("./datos/combustible.csv").Combustible)
Consumo = LabelEncoder().fit(pd.read_csv("./datos/consumo.csv").Consumo)
Descuento = LabelEncoder().fit(pd.read_csv("./datos/descuento.csv").Descuento)
Kilometros = LabelEncoder().fit(pd.read_csv("./datos/kilometros.csv").Kilometros)
Mano = LabelEncoder().fit(pd.read_csv("./datos/mano.csv").Mano)
Potencia = LabelEncoder().fit(pd.read_csv("./datos/potencia.csv").Potencia)
Asientos = LabelEncoder().fit(pd.read_csv("./datos/asientos.csv").Asientos)
Motor_CC=LabelEncoder().fit(pd.read_csv("./datos/motor_cc.csv").Motor_CC)
data_trainCopia['Nombre']=data_trainCopia['Nombre'].fillna(mode(data_trainCopia['Nombre']))
data_trainCopia['Año']=data_trainCopia['Año'].fillna(mode(data_trainCopia['Año']))
data_trainCopia['Ciudad']=data_trainCopia['Ciudad'].fillna(mode(data_trainCopia['Ciudad']))
data_trainCopia['Kilometros']=data_trainCopia['Kilometros'].fillna(mode(data_trainCopia['Kilometros']))
data_trainCopia['Combustible']=data_trainCopia['Combustible'].fillna(mode(data_trainCopia['Combustible']))
data_trainCopia['Tipo_marchas']=data_trainCopia['Tipo_marchas'].fillna(mode(data_trainCopia['Tipo_marchas']))
data_trainCopia['Mano']=data_trainCopia['Mano'].fillna(mode(data_trainCopia['Mano']))
data_trainCopia['Consumo']=data_trainCopia['Consumo'].fillna(mode(data_trainCopia['Consumo']))
data_trainCopia['Motor_CC']=data_trainCopia['Motor_CC'].fillna(mode(data_trainCopia['Motor_CC']))
data_trainCopia['Potencia']=data_trainCopia['Potencia'].fillna(mode(data_trainCopia['Potencia']))
data_trainCopia['Asientos']=data_trainCopia['Asientos'].fillna(mode(data_trainCopia['Asientos']))
data_trainCopia['Descuento']=data_trainCopia['Descuento'].fillna(mode(data_trainCopia['Descuento']))
data_testCopia['Nombre']=data_testCopia['Nombre'].fillna(mode(data_testCopia['Nombre']))
data_testCopia['Año']=data_testCopia['Año'].fillna(mode(data_testCopia['Año']))
data_testCopia['Ciudad']=data_testCopia['Ciudad'].fillna(mode(data_testCopia['Ciudad']))
data_testCopia['Kilometros']=data_testCopia['Kilometros'].fillna(mode(data_testCopia['Kilometros']))
data_testCopia['Combustible']=data_testCopia['Combustible'].fillna(mode(data_testCopia['Combustible']))
data_testCopia['Tipo_marchas']=data_testCopia['Tipo_marchas'].fillna(mode(data_testCopia['Tipo_marchas']))
data_testCopia['Mano']=data_testCopia['Mano'].fillna(mode(data_testCopia['Mano']))
data_testCopia['Consumo']=data_testCopia['Consumo'].fillna(mode(data_testCopia['Consumo']))
data_testCopia['Motor_CC']=data_testCopia['Motor_CC'].fillna(mode(data_testCopia['Motor_CC']))
data_testCopia['Potencia']=data_testCopia['Potencia'].fillna(mode(data_testCopia['Potencia']))
data_testCopia['Asientos']=data_testCopia['Asientos'].fillna(mode(data_testCopia['Asientos']))
data_testCopia['Descuento']=data_testCopia['Descuento'].fillna(mode(data_testCopia['Descuento']))
#Eliminamos las columnas que no necesitamos
data_trainCopia=data_trainCopia.drop(['Descuento'], axis=1)
data_trainCopia=data_trainCopia.drop(['Ciudad'], axis=1)
data_trainCopia=data_trainCopia.drop(['Asientos'], axis=1)
data_trainCopia=data_trainCopia.drop(['id'], axis=1)
data_testCopia=data_testCopia.drop(['Descuento'], axis=1)
data_testCopia=data_testCopia.drop(['Ciudad'], axis=1)
data_testCopia=data_testCopia.drop(['Asientos'], axis=1)
data_testCopia=data_testCopia.drop(['id'], axis=1)
#Eliminamos los nan de los ids
data_trainCopia=data_trainCopia.dropna()
data_testCopia=data_testCopia.dropna()
#Codificación de las filas
data_trainCopia.Nombre = Nombre.transform(data_trainCopia.Nombre)
data_trainCopia.Año = Año.transform(data_trainCopia.Año)
#data_trainCopia.Ciudad = Ciudad.transform(data_trainCopia.Ciudad)
data_trainCopia.Combustible = Combustible.transform(data_trainCopia.Combustible)
data_trainCopia.Potencia = Potencia.transform(data_trainCopia.Potencia)
data_trainCopia.Consumo = Consumo.transform(data_trainCopia.Consumo)
#data_trainCopia.Descuento = Descuento.transform(data_trainCopia.Descuento)
data_trainCopia.Kilometros = Kilometros.transform(data_trainCopia.Kilometros)
data_trainCopia.Mano = Mano.transform(data_trainCopia.Mano)
data_trainCopia.Motor_CC = Motor_CC.transform(data_trainCopia.Motor_CC)
data_trainCopia.Tipo_marchas = Tipo_marchas.transform(data_trainCopia.Tipo_marchas)
#data_trainCopia.Asientos = Asientos.transform(data_trainCopia.Asientos)
data_trainCopia.Tipo_marchas = LabelEncoder().fit_transform(data_trainCopia.Tipo_marchas)
#-------------------------------------------------------------------------------------------
data_testCopia.Nombre = Nombre.transform(data_testCopia.Nombre)
data_testCopia.Año = Año.transform(data_testCopia.Año)
#data_testCopia.Ciudad = Ciudad.transform(data_testCopia.Ciudad)
data_testCopia.Combustible = Combustible.transform(data_testCopia.Combustible)
data_testCopia.Potencia = Potencia.transform(data_testCopia.Potencia)
data_testCopia.Consumo = Consumo.transform(data_testCopia.Consumo)
#data_testCopia.Descuento = Descuento.transform(data_testCopia.Descuento)
data_testCopia.Kilometros = Kilometros.transform(data_testCopia.Kilometros)
data_testCopia.Mano = Mano.transform(data_testCopia.Mano)
data_testCopia.Tipo_marchas = Tipo_marchas.transform(data_testCopia.Tipo_marchas)
#data_testCopia.Asientos = Asientos.transform(data_testCopia.Asientos)
data_testCopia.Motor_CC = Motor_CC.transform(data_testCopia.Motor_CC)
data_testCopia.Tipo_marchas = LabelEncoder().fit_transform(data_testCopia.Tipo_marchas)
#Obtener el resto de los atributos
target_train=data_trainCopia['Precio_cat']
data_trainCopia=data_trainCopia.drop(['Precio_cat'], axis=1)
#atributos=data_train[['id','Nombre','Ciudad','Año','Kilometros','Combustible','Tipo_marchas','Mano','Consumo','Motor_CC','Potencia','Asientos','Descuento']]
atributos=data_trainCopia[['Nombre','Año','Kilometros','Combustible','Tipo_marchas','Mano','Consumo','Motor_CC','Potencia']]
target = pd.read_csv('./datos/precio_cat.csv')
matriz_correlacion = data_trainCopia.corr()
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(matriz_correlacion, annot=True)
plt.show()
def plot(X, y, title="Ejemplo de clases"):
fig, ax = plt.subplots()
ax.set_title(title)
ax.scatter(X[y == 0, 0], X[y == 0, 1], label="id #0", alpha=0.8)
ax.scatter(X[y == 1, 0], X[y == 1, 1], label="id #1", alpha=0.5)
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(random_state=40, sampling_strategy='majority', replacement=False)
Xu, yu = rus.fit_resample(data_trainCopia, target_train)
Counter(yu)
ax=sns.distplot(yu)
from imblearn.over_sampling import SMOTE
Xo, yo = SMOTE().fit_resample(data_trainCopia, target_train)
Counter(yo)
ax=sns.distplot(yo)
lgbm = lgb.LGBMClassifier(objective='multiclassova',n_estimators=200,n_jobs=-1)
lgbmEntrenado = lgbm.fit(data_trainCopia, target_train)
preLgb = lgbmEntrenado.predict(Xu)
scores = cross_val_score(lgbmEntrenado, atributos, target_train, cv=5, scoring='accuracy')
print("Score Validacion Cruzada", np.mean(scores)*100)
xgbclf = xgb.XGBClassifier(max_depth=10, n_estimators = 200, n_jobs=-1)
xgbEntrenado = xgbclf.fit(Xo, yo)
prexgb = xgbEntrenado.predict(Xo)
scores = cross_val_score(xgbEntrenado, atributos, target_train, cv=5, scoring='accuracy')
print("Score Validacion Cruzada", np.mean(scores)*100)
lgbm = lgb.LGBMClassifier(objective='regression_l1',n_estimators=200,n_jobs=-1)
bagging = BaggingClassifier(base_estimator=lgbm, bootstrap=False, max_features=0.9)
bagEntrenado = bagging.fit(data_trainCopia, target_train)
preBaging = bagEntrenado.predict(data_testCopia)
scores = cross_val_score(bagEntrenado, atributos, target_train, cv=5, scoring='accuracy')
print("Score Validacion Cruzada", np.mean(scores)*100)
lgbm = lgb.LGBMClassifier(objective='regression_l1',n_estimators=200,n_jobs=-1)
bagging = BaggingClassifier(base_estimator=lgbm, bootstrap=False, max_features=0.99)
bagEntrenado = bagging.fit(data_trainCopia, target_train)
preBaging = bagEntrenado.predict(data_testCopia)
scores = cross_val_score(bagEntrenado, atributos, target_train, cv=5, scoring='accuracy')
print("Score Validacion Cruzada", np.mean(scores)*100)
lgbm = lgb.LGBMClassifier(objective='regression_l1',n_estimators=200,n_jobs=-1)
bagging = BaggingClassifier(base_estimator=lgbm, max_features=0.97, max_samples=0.8)
bagEntrenado = bagging.fit(data_trainCopia, target_train)
preBaging = bagEntrenado.predict(data_testCopia)
scores = cross_val_score(bagEntrenado, atributos, target_train, cv=5, scoring='accuracy')
print("Score Validacion Cruzada", np.mean(scores)*100)
dfAux = pd.DataFrame({'id':data_test['id']})
dfAux.set_index('id', inplace=True)
dfFinal = pd.DataFrame({'id': data_test['id'], 'Precio_cat': preBaging}, columns=['id', 'Precio_cat'])
dfFinal.set_index('id', inplace=True)
dfFinal.to_csv("./soluciones/solucion8281UnderSamplinglgbmasbagging.csv")
```
| github_jupyter |
## MinMaxScaler
```
from pandas import Series
from sklearn.preprocessing import MinMaxScaler
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
series = Series(data)
print(series)
values = series.values
values = values.reshape((len(values), 1))
print(values)
print(values.shape)
scaler = MinMaxScaler(feature_range=(0, 1))
print(scaler)
scaler = scaler.fit(values)
print('Min: %f, Max: %f' % (scaler.data_min_, scaler.data_max_))
normalized = scaler.transform(values)
print(normalized)
inversed = scaler.inverse_transform(normalized)
inversed
```
## StandardScaler
```
from pandas import Series
from sklearn.preprocessing import StandardScaler
from math import sqrt
data = [1.0, 5.5, 9.0, 2.6, 8.8, 3.0, 4.1, 7.9, 6.3]
series = Series(data)
print(series)
values = series.values
values = values.reshape((len(values), 1))
print(values.shape)
scaler = StandardScaler()
scaler = scaler.fit(values)
print('Mean: %f, StandardDeviation: %f' % (scaler.mean_, sqrt(scaler.var_)))
standardized = scaler.transform(values)
print(standardized)
inversed = scaler.inverse_transform(standardized)
print(inversed)
```
## OneHotEncoder
```
from numpy import array
from numpy import argmax
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
data = ['cold', 'cold', 'warm', 'cold', 'hot', 'hot', 'warm', 'cold', 'warm', 'hot']
values = array(data)
values
# integer encode
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
print(integer_encoded)
# onehot encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
print(onehot_encoded)
inverted = label_encoder.inverse_transform([argmax(onehot_encoded[0, :])])
inverted
```
## Sequence Padding
```
import numpy as np
from keras.preprocessing.sequence import pad_sequences
sequences = [
[1, 2, 3, 4],
[1, 2, 3],
[1]
]
sequences
padded = pad_sequences(sequences)
print(padded)
padded = pad_sequences(sequences, padding='post')
print(padded)
```
## Sequence Truncation
```
from keras.preprocessing.sequence import pad_sequences
sequences = [
[1, 2, 3, 4],
[1, 2, 3],
[1]
]
truncated = pad_sequences(sequences, maxlen=2)
print(truncated)
truncated = pad_sequences(sequences, maxlen=2, truncating='post')
print(truncated)
```
## Sequence vs Supervised Learning
```
from pandas import DataFrame
df = DataFrame()
df['t'] = [x for x in range(10)]
df['t-1'] = df['t'].shift(-1)
print(df)
```
| github_jupyter |
## Tools for CSV FIle Processing
### Gather Phase Tools
This importable notebook provides the tooling necessary to handle the processing for the **Gather Phases** in the ETL process for the NOAA HDTA project. This tooling supports Approaches 1 and 2 using **CSV files**.
Each of the process phases require a dictionary to drive the workflow.
```
project_layout = {
'Content_Version': '',
'Daily_Input_Files': '',
'Raw_Details': '',
'Missing_Details': '',
'Station_Summary': '',
'Station_Details': '',
}
```
Process Phase | Function to run
--- | ---
Phase 1 Approach 1 | noaa_run_phase1_approach1(project_layout)
Phase 2 Approach 1 | noaa_run_phase2_approach1(project_layout)
```
# <help>
# <api>
import os
import time
import glob
import struct
import collections
import pandas as pd
# Create a collection for returning multiple lists of tuples
approach1_bag = collections.namedtuple('GatherBag', ['raw', 'missing'])
# Historical Raw Daily Detail
raw_daily_detail_rec_template = {'StationID': "",
'Year': "",
'Month': "",
'Day': "",
'Type': "",
'FahrenheitTemp': "",
}
# Historical Daily Missing Record Detail
missing_detail_rec_template = {'StationID': "",
'Year': "",
'Month': "",
'Day': "",
'Type': "",
}
def get_filename(pathname):
'''Fetch filename portion of pathname.'''
plist = pathname.split('/')
fname, fext = os.path.splitext(plist[len(plist)-1])
return fname
def elapsed_time(secs):
'''Compute formated time stamp given seconds elapsed. '''
m, s = divmod(secs, 60)
h, m = divmod(m, 60)
et = "%d:%02d:%02d" % (h, m, s)
return et
def noaa_convert_c2f(noaa_temp):
'''Returns Fahrenheit temperature value from a NOAA temperature (tenths of degrees C) '''
celsius = int(noaa_temp)/10
fahrenheit = 9.0/5.0 * celsius + 32
return round(fahrenheit,1)
def noaa_gather_lines(lines):
'''Return dataframes for raw and missing detail from list of lines.'''
# Create list of tuples
raw_list = []
missing_list = []
for index, line in enumerate(lines):
#print("Processing line {0}.").format(index)
r = noaa_gather_daily_detail(line)
raw_list += r.raw
missing_list += r.missing
# Construct dataframes
df_raw = pd.DataFrame(raw_list)
df_missing = pd.DataFrame(missing_list)
return approach1_bag(df_raw, df_missing)
def noaa_gather_daily_detail(line):
'''Extract content from daily record, create raw and missing tuples.'''
station_time_element = struct.unpack('11s4s2s4s', line[0:21])
raw_tuple_list = []
missing_tuple_list = []
if station_time_element[3] == 'TMIN' or station_time_element[3] == 'TMAX':
values = line[21:]
day_of_month = 0
while(len(values) > 7):
day_of_month = day_of_month + 1
day_measure = struct.unpack('5ssss', values[0:8])
if day_measure[0] != '-9999':
raw_tuple = dict(raw_daily_detail_rec_template)
# Compute degrees fahrenheit
fahrenheit = noaa_convert_c2f(int(day_measure[0]))
# Construct raw detail record
raw_tuple['StationID'] = station_time_element[0]
raw_tuple['Year'] = station_time_element[1]
raw_tuple['Month']= station_time_element[2]
raw_tuple['Day'] = day_of_month
raw_tuple['Type'] = station_time_element[3]
raw_tuple['FahrenheitTemp'] = fahrenheit
raw_tuple_list.append(raw_tuple)
else:
# Construct missing detail record
missing_tuple = dict(missing_detail_rec_template)
missing_tuple['StationID'] = station_time_element[0]
missing_tuple['Year'] = station_time_element[1]
missing_tuple['Month']= station_time_element[2]
missing_tuple['Day'] = day_of_month
missing_tuple['Type'] = station_time_element[3]
missing_tuple_list.append(missing_tuple)
# Adjust offest for next day
values = values[8:]
# Return new tuples
return approach1_bag(raw_tuple_list, missing_tuple_list)
def noaa_process_hcn_daily_file(fname):
'''Return dataframes for raw and missing detail from lines in file.'''
print("Extracting content from file {0}.").format(fname)
x = 0
raw_cols = ['StationID', 'Year', 'Month', 'Day', 'Type', 'FahrenheitTemp']
missing_cols = ['StationID', 'Year', 'Month', 'Day', 'Type']
# Create list of tuples
raw_list = []
missing_list = []
# Start Timer
start_time = time.time()
with open(fname,'r') as f:
lines = f.readlines()
# Changed next 2 lines only.
for line in lines:
x += 1
#print(" .... Processing line {0}.").format(x)
r = noaa_gather_daily_detail(line)
raw_list += r.raw
missing_list += r.missing
f.close()
seconds = (time.time() - start_time)
print(">> Processing Complete: {0} lines of file {1}.").format(x, fname)
print(">> Elapsed file execution time {0}").format(elapsed_time(seconds))
# Capture and Sort Results in DataFrames
df_raw = pd.DataFrame(raw_list)
df_missing = pd.DataFrame(missing_list)
r = df_raw.sort(raw_cols).reindex(columns=raw_cols)
m = df_missing.sort(missing_cols).reindex(columns=missing_cols)
return approach1_bag(r, m)
def noaa_run_phase1_approach1(project_layout):
'''Process corpus of daily files and store results in CSV files.'''
try:
if not project_layout['Daily_Input_Files']:
raise Exception("Incomplete or missing dictionary of project folder details.")
print(">> Processing Started ...")
# Start Timer
start_time = time.time()
for index, fname in enumerate(glob.glob(project_layout['Daily_Input_Files'])):
station_name = get_filename(fname)
print(">> Processing file {0}: {1}").format(index, station_name)
raw_file = os.path.join(project_layout['Raw_Details'], '', station_name + '_raw.csv')
missing_file = os.path.join(project_layout['Missing_Details'], '', station_name + '_mis.csv')
r = noaa_process_hcn_daily_file(fname)
r.raw.to_csv(raw_file)
r.missing.to_csv(missing_file)
seconds = (time.time() - start_time)
print(">> Processing Complete.")
print(">> Elapsed corpus execution time {0}").format(elapsed_time(seconds))
except Exception as e:
print(">> Processing Failed: Error {0}").format(e.message)
```
### noaa_run_phase1_approach1
Takes a dictionary of project folder details to drive the processing of *Gather Phase 1 Approach 1* using **CSV files**.
```
# <help:noaa_run_phase1_approach1>
project_layout = {
'Content_Version': '',
'Daily_Input_Files': '',
'Raw_Details': '',
'Missing_Details': '',
'Station_Summary': '',
'Station_Details': '',
}
noaa_run_phase1_approach1(project_layout)
# <api>
import os
import glob
import time
import datetime
import collections
import pandas as pd
import traceback
# Create a collection for returning multiple lists of tuples
approach2_bag = collections.namedtuple('GatherBag', ['DailySummary', 'DailyDetail'])
# Historical Daily Summary
summary_template = {'StationID': "",
'Month': "",
'Day': "",
'FirstYearOfRecord': "",
'TMin': "",
'TMinRecordYear': "",
'TMax': "",
'TMaxRecordYear': "",
'CurTMinMaxDelta': "",
'CurTMinRecordDur': "",
'CurTMaxRecordDur': "",
'MaxDurTMinRecord': "",
'MinDurTMinRecord': "",
'MaxDurTMaxRecord': "",
'MinDurTMaxRecord': "",
'TMinRecordCount': "",
'TMaxRecordCount': ""
}
summary_cols = ['StationID', 'Month', 'Day', 'FirstYearOfRecord',
'TMin', 'TMinRecordYear', 'TMax', 'TMaxRecordYear',
'CurTMinMaxDelta', 'CurTMinRecordDur','CurTMaxRecordDur',
'MaxDurTMinRecord', 'MinDurTMinRecord',
'MaxDurTMaxRecord', 'MinDurTMaxRecord',
'TMinRecordCount', 'TMaxRecordCount'
]
# Historical Daily Detail
detail_template = {'StationID': "",
'Year': "",
'Month': "",
'Day': "",
'Type': "",
'OldTemp': "",
'NewTemp': "",
'TDelta': ""
}
detail_cols = ['StationID', 'Year', 'Month', 'Day', 'Type',
'NewTemp', 'OldTemp', 'TDelta'
]
def get_filename(pathname):
'''Fetch filename portion of pathname.'''
plist = pathname.split('/')
fname, fext = os.path.splitext(plist[len(plist)-1])
return fname
def elapsed_time(secs):
'''Compute formated time stamp given seconds elapsed. '''
m, s = divmod(secs, 60)
h, m = divmod(m, 60)
et = "%d:%02d:%02d" % (h, m, s)
return et
def get_key_list(hdf5file, type='raw_detail'):
'''Return a list of keys for requested type from specified HDF file.'''
print("Fetching keys for type = {0}").format(type)
keylist = []
store = None
try:
store = pd.HDFStore(hdf5file,'r')
h5keys = store.keys()
store.close()
for k in h5keys:
if k.find(type) > -1:
keylist.append(k)
except:
if store:
store.close()
raise
return keylist
def cleans_invalid_days(df):
'''Return a dataframe void of invalid days'''
ShortMths = {4,6,9,11}
df_clean = df.query('(((Month not in @ShortMths) & (Day != 31)) and ((Month != 2) or (Day < 30)) )')
return df_clean
def noaa_process_phase2_records(raw_tuples):
'''Compute formated time stamp given seconds elapsed. '''
# Sample Tuple:
# (0, 'USC00011084', '1926', '01', 21, 'TMAX', 73.400000000000006)
#
# Create several 12x31 matrices to store daily detail per metric of interest.
fyr_online_for_day = [[9999 for x in range(32)] for x in range(13)]
tmin_history = [[-99 for x in range(32)] for x in range(13)]
tmax_history = [[-99 for x in range(32)] for x in range(13)]
tminyr_history = [[-99 for x in range(32)] for x in range(13)]
tmaxyr_history = [[-99 for x in range(32)] for x in range(13)]
tminrc_history = [[0 for x in range(32)] for x in range(13)]
tmaxrc_history = [[0 for x in range(32)] for x in range(13)]
tmax_max_life = [[0 for x in range(32)] for x in range(13)]
tmax_min_life = [[9999 for x in range(32)] for x in range(13)]
tmin_max_life = [[0 for x in range(32)] for x in range(13)]
tmin_min_life = [[9999 for x in range(32)] for x in range(13)]
# Capture Station ID (all raw-tuples are per station)
station_ID = raw_tuples[0][1]
# Process each raw daily tuple: create daily retail tuples while updating matrices.
detail_list = []
for t in raw_tuples:
detail_row = dict(detail_template)
detail_row['StationID'] = t[1]
detail_row['Year'] = t[2]
detail_row['Month'] = t[3]
detail_row['Day'] = str(t[4])
month = int(t[3])-1
day = t[4]-1
# For this day, what was the first year in which this station was operational?
if fyr_online_for_day[month][day] > int(t[2]):
fyr_online_for_day[month][day] = int(t[2])
# Handle TMAX
if (t[5] == 'TMAX'):
# Handle TMAX for first record
if (tmax_history[month][day] == -99):
# Handle TMAX for first
detail_row['Type'] = 'TMAX'
detail_row['OldTemp'] = round(t[6],1)
detail_row['NewTemp'] = round(t[6],1)
detail_row['TDelta'] = 0
tmax_history[month][day] = round(t[6],1)
tmaxyr_history[month][day] = int(t[2])
tmaxrc_history[month][day] = 1
tmax_min_life[month][day] = 0
tmax_max_life[month][day] = 0
# Add new daily detail row
detail_list.append(detail_row)
# Handle TMAX for new daily record
elif (round(t[6],1) > tmax_history[month][day]):
detail_row['Type'] = 'TMAX'
detail_row['OldTemp'] = tmax_history[month][day]
detail_row['NewTemp'] = round(t[6],1)
detail_row['TDelta'] = round(t[6],1) - tmax_history[month][day]
current_tmin_duration = int(t[2]) - tminyr_history[month][day]
current_tmax_duration = int(t[2]) - tmaxyr_history[month][day]
if tmin_max_life[month][day] == 0:
tmin_max_life[month][day] = int(t[2]) - fyr_online_for_day[month][day]
if tmax_max_life[month][day] == 0:
tmax_max_life[month][day] = int(t[2]) - fyr_online_for_day[month][day]
if current_tmax_duration > tmax_max_life[month][day]:
tmax_max_life[month][day] = current_tmax_duration
if current_tmin_duration < tmin_max_life[month][day]:
tmin_max_life[month][day] = current_tmax_duration
tmax_history[month][day] = round(t[6],1)
tmaxyr_history[month][day] = int(t[2])
tmaxrc_history[month][day] += 1
# Add new daily detail row
detail_list.append(detail_row)
if (t[5] == 'TMIN'):
# Handle TMIN for first record
if (tmin_history[month][day] == -99):
# Handle TMIN for first
detail_row['Type'] = 'TMIN'
detail_row['OldTemp'] = round(t[6],1)
detail_row['NewTemp'] = round(t[6],1)
detail_row['TDelta'] = 0
tmin_history[month][day] = round(t[6],1)
tminyr_history[month][day] = int(t[2])
tminrc_history[month][day] = 1
tmin_min_life[month][day] = 0
tmin_max_life[month][day] = 0
# Add new daily detail row
detail_list.append(detail_row)
# Handle TMIN for new daily record
elif (round(t[6],1) < tmin_history[month][day]):
detail_row['Type'] = 'TMIN'
detail_row['OldTemp'] = tmin_history[month][day]
detail_row['NewTemp'] = round(t[6],1)
detail_row['TDelta'] = tmin_history[month][day] - round(t[6],1)
current_tmin_duration = int(t[2]) - tminyr_history[month][day]
current_tmax_duration = int(t[2]) - tmaxyr_history[month][day]
if tmax_min_life[month][day] == 0:
tmax_min_life[month][day] = int(t[2]) - fyr_online_for_day[month][day]
if tmin_min_life[month][day] == 0:
tmin_min_life[month][day] = int(t[2]) - fyr_online_for_day[month][day]
if current_tmax_duration > tmax_min_life[month][day]:
tmax_min_life[month][day] = current_tmin_duration
if current_tmin_duration < tmin_min_life[month][day]:
tmin_min_life[month][day] = current_tmin_duration
tmin_history[month][day] = round(t[6],1)
tminyr_history[month][day] = int(t[2])
tminrc_history[month][day] += 1
# Add new daily detail row
detail_list.append(detail_row)
# Create a daily summary record for each day of the year using our matrices.
summary_list = []
now = datetime.datetime.now()
for mth in xrange(1,13):
for day in xrange(1,32):
m = mth-1
d= day-1
summary_row = dict(summary_template)
summary_row['StationID'] = station_ID
summary_row['Month'] = mth
summary_row['Day'] = day
summary_row['FirstYearOfRecord'] = fyr_online_for_day[m][d]
summary_row['TMin'] = tmin_history[m][d]
summary_row['TMinRecordYear'] = tminyr_history[m][d]
summary_row['TMax'] = tmax_history[m][d]
summary_row['TMaxRecordYear'] = tmaxyr_history[m][d]
summary_row['CurTMinMaxDelta'] = summary_row['TMax'] - summary_row['TMin']
summary_row['CurTMinRecordDur'] = int(now.year) - summary_row['TMinRecordYear']
summary_row['CurTMaxRecordDur'] = int(now.year) - summary_row['TMaxRecordYear']
summary_row['MaxDurTMinRecord'] = tmax_min_life[m][d] # Can not explain
summary_row['MinDurTMinRecord'] = tmin_min_life[m][d]
summary_row['MaxDurTMaxRecord'] = tmax_max_life[m][d]
summary_row['MinDurTMaxRecord'] = tmin_max_life[m][d] # Can not explain
summary_row['TMinRecordCount'] = tminrc_history[m][d]
summary_row['TMaxRecordCount'] = tmaxrc_history[m][d]
# Add new daily summary row
summary_list.append(summary_row)
return approach2_bag(summary_list, detail_list)
def noaa_run_phase2_approach1(project_layout,create_details=False):
'''Parse raw CVS files to create derived datasets.'''
summary_list = []
detail_list = []
try:
# Start Key Processing Timer
start_time = time.time()
raw_files = os.path.join(project_layout['Raw_Details'], '', '*_raw.csv')
for index, fname in enumerate(glob.glob(raw_files)):
f = get_filename(fname).split('_')[0]
print("Processing dataset {0}: {1}").format(index,f)
summary_file = os.path.join(project_layout['Station_Summary'], '', f + '_sum.csv')
detail_file = os.path.join(project_layout['Station_Details'], '', f + '_std.csv')
dataset = pd.DataFrame.from_csv(fname)
raw_tuples = list(dataset.itertuples())
r = noaa_process_phase2_records(raw_tuples)
df_summary = pd.DataFrame(r.DailySummary).sort(summary_cols).reindex(columns=summary_cols)
df_cleaned_summary = cleans_invalid_days(df_summary)
df_cleaned_summary.to_csv(summary_file)
df_detail = pd.DataFrame(r.DailyDetail).sort(detail_cols).reindex(columns=detail_cols)
df_cleaned_detail = cleans_invalid_days(df_detail)
df_cleaned_detail.to_csv(detail_file)
seconds = (time.time() - start_time)
print(">> Processing Complete.")
print(">> Elapsed corpus execution time {0}").format(elapsed_time(seconds))
except Exception as e:
var = traceback.format_exc()
print var
print(">> Processing Failed: Error {0}").format(e.message)
```
### noaa_run_phase2_approach1
Takes a dictionary of project folder details to drive the processing of *Gather Phase 2 Approach 1* using **CSV files**.
#### Disk Storage Requirements
* This function creates a **Station Summaries** dataset that requires 25MB of free space.
* This function can also create a **Station Details** dataset. If you require this dataset to be generated, modify the call to ```noaa_run_phase2_approach2()``` with ```create_details=True```. You will need additional free space to support this feature. Estimated requirement: <font color="red">**150MB**</font>
```
# <help:noaa_run_phase2_approach1>
project_layout = {
'Content_Version': '',
'Daily_Input_Files': '',
'Raw_Details': '',
'Missing_Details': '',
'Station_Summary': '',
'Station_Details': '',
}
noaa_run_phase2_approach1(project_layout)
```
| github_jupyter |
```
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex client library: Local tabular regression model for online prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_local_tabular_regression_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_local_tabular_regression_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use the Vertex client library for Python to deploy a locally trained custom tabular regression model for online prediction.
### Dataset
The dataset used for this tutorial is the [Boston Housing Prices dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD.
### Objective
In this notebook, you create a custom model locally in the notebook, then learn to deploy the locally trained model to Vertex, and then do a prediction on the deployed model. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console.
The steps performed include:
- Create a model locally.
- Train the model locally.
- View the model evaluation.
- Upload the model as a Vertex `Model` resource.
- Deploy the `Model` resource to a serving `Endpoint` resource.
- Make a prediction.
- Undeploy the `Model` resource.
### Costs
This tutorial uses billable components of Google Cloud (GCP):
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
## Installation
Install the latest version of Vertex client library.
```
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip3 install -U google-cloud-storage $USER_FLAG
```
### Restart the kernel
Once you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
```
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations)
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you submit a custom training job using the Vertex client library, you upload a Python package
containing your training code to a Cloud Storage bucket. Vertex runs
the code from this package. In this tutorial, Vertex also saves the
trained model that results from your job in the same bucket. You can then
create an `Endpoint` resource based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
#### Import Vertex client library
Import the Vertex client library into our Python environment.
```
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
```
#### Vertex constants
Setup up the following constants for Vertex:
- `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.
- `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.
```
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
```
#### Hardware Accelerators
Set the hardware accelerators (e.g., GPU), if any, for prediction.
Set the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
For GPU, available accelerators include:
- aip.AcceleratorType.NVIDIA_TESLA_K80
- aip.AcceleratorType.NVIDIA_TESLA_P100
- aip.AcceleratorType.NVIDIA_TESLA_P4
- aip.AcceleratorType.NVIDIA_TESLA_T4
- aip.AcceleratorType.NVIDIA_TESLA_V100
Otherwise specify `(None, None)` to use a container image to run on a CPU.
```
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
```
#### Container (Docker) image
Next, we will set the Docker container images for prediction
- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:
- TensorFlow 1.15
- `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest`
- `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest`
- TensorFlow 2.1
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest`
- TensorFlow 2.2
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest`
- TensorFlow 2.3
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest`
- XGBoost
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest`
- Scikit-learn
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
```
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU)
```
#### Machine Type
Next, set the machine type to use for prediction.
- Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM you will use for prediction.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*
```
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
```
# Tutorial
Now you are ready to start locally training a custom model Boston Housing, and then deploy the model to the cloud.
## Set up clients
The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.
You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
- Model Service for `Model` resources.
- Endpoint Service for deployment.
- Prediction Service for serving.
```
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
```
## Train a model locally
In this tutorial, you train a Boston Housing model locally.
### Set location to store trained model
You set the variable `MODEL_DIR` for where in your Cloud Storage bucket to save the model in TensorFlow SavedModel format.
Also, you create a local folder for the training script.
```
MODEL_DIR = BUCKET_NAME + "/boston"
model_path_to_deploy = MODEL_DIR
! rm -rf custom
! mkdir custom
! mkdir custom/trainer
```
#### Task.py contents
In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary:
- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.
- Loads Boston Housing dataset from TF.Keras builtin datasets
- Builds a simple deep neural network model using TF.Keras model API.
- Compiles the model (`compile()`).
- Sets a training distribution strategy according to the argument `args.distribute`.
- Trains the model (`fit()`) with epochs specified by `args.epochs`.
- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
- Saves the maximum value for each feature `f.write(str(params))` to the specified parameters file.
```
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
```
### Train the model
```
! python custom/trainer/task.py --epochs=10 --model-dir=$MODEL_DIR
```
## Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
```
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
```
## Evaluate the model
Now let's find out how good the model is.
### Load evaluation data
You will load the Boston Housing test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home).
You don't need the training data, and hence why we loaded it as `(_, _)`.
Before you can run the data through evaluation, you need to preprocess it:
x_test:
1. Normalize (rescaling) the data in each column by dividing each value by the maximum value of that column. This will replace each single value with a 32-bit floating point number between 0 and 1.
```
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
```
### Perform the model evaluation
Now evaluate how well the model in the custom job did.
```
model.evaluate(x_test, y_test)
```
## Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
### How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.
The serving function consists of two parts:
- `preprocessing function`:
- Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph).
- Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.
- `post-processing function`:
- Converts the model output to format expected by the receiving application -- e.q., compresses the output.
- Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.
Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.
One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported.
## Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
```
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
```
### Upload the model
Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.
The helper function takes the following parameters:
- `display_name`: A human readable name for the `Endpoint` service.
- `image_uri`: The container image for the model deployment.
- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.
The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:
- `parent`: The Vertex location root path for `Dataset`, `Model` and `Endpoint` resources.
- `model`: The specification for the Vertex `Model` resource instance.
Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields:
- `display_name`: A human readable name for the `Model` resource.
- `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`).
- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.
- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready.
The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
```
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
```
### Get `Model` resource information
Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:
- `name`: The Vertex unique identifier for the `Model` resource.
This helper function calls the Vertex `Model` client service's method `get_model`, with the following parameter:
- `name`: The Vertex unique identifier for the `Model` resource.
```
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
```
## Deploy the `Model` resource
Now deploy the trained Vertex custom `Model` resource. This requires two steps:
1. Create an `Endpoint` resource for deploying the `Model` resource to.
2. Deploy the `Model` resource to the `Endpoint` resource.
### Create an `Endpoint` resource
Use this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter:
- `display_name`: A human readable name for the `Endpoint` resource.
The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter:
- `display_name`: A human readable name for the `Endpoint` resource.
Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the `Endpoint` resource: `response.name`.
```
ENDPOINT_NAME = "boston_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
```
Now get the unique identifier for the `Endpoint` resource you created.
```
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
```
### Compute instance scaling
You have several choices on scaling the compute instances for handling your online prediction requests:
- Single Instance: The online prediction requests are processed on a single compute instance.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.
- Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.
- Auto Scaling: The online prediction requests are split across a scaleable number of compute instances.
- Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.
The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
```
MIN_NODES = 1
MAX_NODES = 1
```
### Deploy `Model` resource to the `Endpoint` resource
Use this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters:
- `model`: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline.
- `deploy_model_display_name`: A human readable name for the deployed model.
- `endpoint`: The Vertex fully qualified endpoint identifier to deploy the model to.
The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters:
- `endpoint`: The Vertex fully qualified `Endpoint` resource identifier to deploy the `Model` resource to.
- `deployed_model`: The requirements specification for deploying the model.
- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.
- If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic.
- If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100.
Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields:
- `model`: The Vertex fully qualified model identifier of the (upload) model to deploy.
- `display_name`: A human readable name for the deployed model.
- `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.
- `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests.
- `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
- `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`.
- `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`.
#### Traffic Split
Let's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.
Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision.
#### Response
The method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.
```
DEPLOYED_NAME = "boston_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
```
## Make a online prediction request
Now do a online prediction to your deployed model.
### Get test item
You will use an example out of the test (holdout) portion of the dataset as a test item.
```
test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape)
```
### Send the prediction request
Ok, now you have a test data item. Use this helper function `predict_data`, which takes the parameters:
- `data`: The test data item as a numpy 1D array of floating point values.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed.
- `parameters_dict`: Additional parameters for serving.
This function uses the prediction client service and calls the `predict` method with the parameters:
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed.
- `instances`: A list of instances (data items) to predict.
- `parameters`: Additional parameters for serving.
To pass the test data to the prediction service, you package it for transmission to the serving binary as follows:
1. Convert the data item from a 1D numpy array to a 1D Python list.
2. Convert the prediction request to a serialized Google protobuf (`json_format.ParseDict()`)
Each instance in the prediction request is a dictionary entry of the form:
{input_name: content}
- `input_name`: the name of the input layer of the underlying model.
- `content`: The data item as a 1D Python list.
Since the `predict()` service can take multiple data items (instances), you will send your single data item as a list of one data item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service.
The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:
- `predictions` -- the predicated median value of a house in units of 1K USD.
```
def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None)
```
## Undeploy the `Model` resource
Now undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters:
- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to.
This function calls the endpoint client service's method `undeploy_model`, with the following parameters:
- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed.
- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed.
- `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource.
Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}.
```
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
```
# Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
# Getting Started with CLX and Streamz
This is a guide on how [CLX](https://github.com/rapidsai/clx) and [Streamz](https://streamz.readthedocs.io/en/latest/) can be used to build a streaming inference pipeline.
Streamz has the ability to read from [Kafka](https://kafka.apache.org/) directly into [Dask](https://dask.org/) allowing for computation on a multi-core or cluster environment. This approach is best used for instances in which you hope to increase processing speeds with streaming data.
A selection of workflows such as cyBERT and DGA detection inferencing are implemented in CLX streamz. Here we share an example in which we demonstrate how to read Apache log data from Kafka, perform log parsing using CLX cyBERT and publish result data back to Kafka. Similarly, also for DGA detection.
## Build Quickstart Docker Image
For convenience, you can build a Docker image that will include a working environment that's ready for running your pipeline. This image will contain all needed components including [Kafka](https://kafka.apache.org/) and [Zookeeper](https://zookeeper.apache.org/).
Prerequisites:
* NVIDIA Pascal™ GPU architecture or better
* CUDA 10.1+ compatible NVIDIA driver
* Ubuntu 16.04/18.04 or CentOS 7
* Docker CE v18+
* nvidia-docker v2+
Run the following to build the image:
`
docker build -f examples/streamz/Dockerfile -t clx-streamz:latest .
`
## Create Docker Container
A Docker container is created using the image above. The 'docker run' format to build your container is shown below. Note: volume binding to the container is an optional argument.
**Preferred - Docker CE v19+ and nvidia-container-toolkit**
```
docker run -it \
-p 9787:8787 \
-p 9888:8888 \
-v <your_volume_binding_host_directory_path>:<your_volume_binding_container_directory_path> \
--gpus '"device=0,1,2"' \
--name clx_streamz \
-d clx-streamz:latest
```
**Legacy - Docker CE v18 and nvidia-docker2**
```
docker run -it \
-p 9787:8787 \
-p 9888:8888 \
-v <your_volume_binding_host_directory_path>:<your_volume_binding_container_directory_path> \
--runtime=nvidia \
--name clx_streamz \
-d cybert-streamz:latest
```
The Dockerfile contains an ENTRYPOINT which calls [entrypoint.sh](https://github.com/rapidsai/clx/blob/branch-0.17/examples/streamz/scripts/entrypoint.sh) to:
1. Configure and install Kafka
2. Run Kafka broker on `localhost:9092` and Zookeeper on `localhost:2181`
3. Creates (cyBERT and DGA detection) specific input and output kafka topics and publishes sample input data
Your Quickstart Docker container includes the data and models required to run cyBERT and DGA detection stream processing workflows.
## Run cyBERT Streamz Example on Apache Logs
```
docker exec clx_streamz bash -c 'source activate rapids \
&& python $CLX_STREAMZ_HOME/python/cybert.py \
--conf $CLX_STREAMZ_HOME/resources/cybert.yaml \
--model $CLX_STREAMZ_HOME/ml/models/cybert/pytorch_model.bin \
--label_map $CLX_STREAMZ_HOME/ml/models/cybert/config.json \
--poll_interval 1s \
--max_batch_size 500'
```
## Run DGA Streamz Example on Sample Domains
```
docker exec clx_streamz bash -c 'source activate rapids \
&& python $CLX_STREAMZ_HOME/python/dga_detection.py \
--conf $CLX_STREAMZ_HOME/resources/dga_detection.yaml \
--model $CLX_STREAMZ_HOME/ml/models/dga/pytorch_model.bin \
--poll_interval 1s \
--max_batch_size 500'
```
Processed data will be pushed to the given kafka output topic. To view all processed output run:
```
docker exec clx_streamz bash -c 'source activate rapids \
&& $KAFKA_HOME/bin/kafka-console-consumer.sh \
--bootstrap-server <broker> \
--topic <output_topic> \
--from-beginning'
```
View the data processing activity on the dask dashboard by visiting http://localhost:9787 or `<host>:9787`
## Capturing Benchmarks
To capture benchmarks add the benchmark flag along with average log size (kb), for throughput (mb/s) and average batch size (mb) estimates, to the docker run command above. In this case, we are benchmarking the cyBERT workflow with the commands below. Similarly, we can also do it for the DGA detection workflow.
```
docker exec clx_streamz bash -c 'source activate rapids \
&& python $CLX_STREAMZ_HOME/python/cybert.py \
--conf $CLX_STREAMZ_HOME/resources/cybert.yaml \
--model $CLX_STREAMZ_HOME/ml/models/cybert/pytorch_model.bin \
--label_map $CLX_STREAMZ_HOME/ml/models/cybert/config.json \
--poll_interval 1s \
--max_batch_size 500 \
--benchmark 20' \
> cybert_workflow.log 2>&1 &
```
To print benchmark, send a SIGINT signal to the running cybert process.
```
# To get the PID
$ docker exec clx_streamz ps aux | grep "cybert\.py" | awk '{print $2}'
# Kill process
$ docker exec clx_streamz kill -SIGINT <pid>
$ less cybert_workflow.log
```
## Steps to Run Workflow with Custom Arguments
1. Create kafka topics for the clx_streamz workflows that you want to run and publish input data.
```
docker exec clx_streamz /bin/bash -c 'source activate rapids \
&& $CLX_STREAMZ_HOME/scripts/kafka_topic_setup.sh \
-b localhost:9092 \
-i <input_topic> \
-o <output_topic> \
-d <data_filepath>'
```
2. Start workflow
```
docker exec clx_streamz bash -c 'source activate rapids \
&& python $CLX_STREAMZ_HOME/python/<workflow_script> \
--conf <configuration filepath> \
--model <model filepath> \
--label_map <labels filepath> \
--poll_interval <poll_interval> \
--max_batch_size <max_batch_size> \
--benchmark <avg log size>'
```
**Parameters:**
- `conf` - The path to specify source and sink configuration properties.
- `model_file` - The path to your model file
- `label_file` - The path to your label file
- `poll_interval`* - Interval (in seconds) to poll the Kafka input topic for data (Ex: 60s)
- `max_batch_size`* - Max batch size of data (max number of logs) to ingest into streamz with each `poll_interval`
- `benchmark` - To capture benchmarks add the benchmark flag along with average log size (kb), for throughput (mb/s) and average batch size (mb) estimates.
``*`` = More information on these parameters can be found in the streamz [documentation](https://streamz.readthedocs.io/en/latest/api.html#streamz.from_kafka_batched).
**Configuration File Properties**
- `cudf_engine` - This value determines whether to use cudf engine while consuming messages using streamz API
- `kafka_conf`
- `input_topic` - Consumer Kafka topic name
- `output_topic` - Publisher Kafka topic name
- `n_partitions` - Number of partitions in the consumer Kafka topic
- `producer_conf` - User can specify any valid Kafka producer configuration within this block
- `bootstrap.servers` - Kafka brokers Ex: localhost:9092, localhost2:9092
- `session.timeout.ms` - Message publishing timout
- `queue.buffering.max.messages` - Max number of messages that can hold in the queue
- `...`
- `consumer_conf` - User can specify any valid Kafka consumer configuration within this block
- `bootstrap.servers` - Kafka brokers Ex: localhost:9092, localhost2:9092
- `group.id` - Kafka consumer group id
- `session.timeout.ms` - Message consuming timout
- `...`
- `elasticsearch_conf` - Elasticsearch sink configuration
- `url` - Elasticsearch service url
- `port` - Elasticsearch service port
- `cafile` - Path to pem file
- `username` - Username
- `password` - Password
- `index` - Name to index the documents
- `sink` - Sink to write processed data Ex: "kafka" or "elasticsearch" or "filesystem"
**Note: Below properties are used only when sink is set to "filesystem"**
- `col_delimiter` - Column delimiter Ex: ","
- `file_extension` - File extension Ex: ".csv"
- `output_dir` - Output filepath
| github_jupyter |
# About OCR approach1:
Through ocr1.py script we are targeting to train a small Convolutional Neurl Network (CNN) with the data we generated using random_string_data_gen.py. Network should be able to recognize the random string in a given image and provide it as ouput.
In the first phase we will be testing it using generated data itself whereas later we try to crop some image from our screen meeting dataset contrain and see how well network works.
Link to blog: https://medium.com/@vijendra1125/ocr-part-2-ocr-using-cnn-f43f0cee8016
## Load Libraries
```
import os
from datetime import datetime as dt
import string
import numpy as np
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from ocr1_functions import *
# set ro print numpy array without truncation
np.set_printoptions(threshold=sys.maxsize)
bold = '\033[1m'
end = '\033[0m'
```
### Parameters
```
## paths ##
# path to folder where data (tfrecord) files has been stored
folder_path = "../data"
# path to load checkpoint
checkpoint_restore = "../cp/ocr1_3to8"
# path to save checkpoint
checkpoint_save = "../cp/ocr1_3to8"
## data realted params ##
# train and test file paths
filenames = os.listdir(folder_path)
train_file_paths = []
test_file_paths = []
for filename in filenames:
if "train" in filename:
train_file_paths.append(os.path.join(folder_path, filename))
elif "test" in filename:
test_file_paths.append(os.path.join(folder_path, filename))
# total number of data in each train tfrecord
# data_per_train_file = 8192
total number of data in each test tfrecord
data_per_test_file = 2048
# image size
image_size = [32,256,1]
# total numember of classes
class_count = 63
# string length (including whitespace)
string_length = 16
## training setup related params ##
# restore from given checkpoint
restore = False
# dropout for each layer (1 means no drop)
dropout = [1, 1, 1, 1]
# weight decay
wd = 0.000
# learning rafe
lr = 0.01
# batch size
# batch_size = 32
batch_size = 1
# total number of epochs
epochs = 5
# after every x epoch decrease learning rate by factor of y (var_lr = [x, y])
var_lr=[None,None]
# parameters related to reading tfrecord
num_of_threads=16
min_after_dequeue=5000
capacity=min_after_dequeue+(num_of_threads+1)*batch_size
```
### Train
```
# data count
train_data_count = data_per_train_file * len(train_file_paths)
test_data_count = data_per_test_file * len(test_file_paths)
# steps
train_step = train_data_count//batch_size
test_step = test_data_count//batch_size
# build graph
with tf.Graph().as_default():
# train graph
x_train, y_train = minibatch(batch_size, train_file_paths, image_size, string_length, class_count)
logit_train = inference(x_train, class_count, dropout = dropout, wd = wd)
cost = multi_loss(logit_train, y_train, batch_size, string_length)
update=parameter_update(cost,lr)
accuracy_train = accuracy_calc(logit_train, y_train)
# test graph
x_test, y_test = minibatch(batch_size, test_file_paths, image_size, string_length, class_count)
logit_test = inference(x_test, class_count)
accuracy_test = accuracy_calc(logit_test, y_test)
saver = tf.train.Saver()
# start session
with tf.Session() as sess:
# initialize the variables
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# restore the variables
if restore == True:
loader = tf.train.import_meta_graph(checkpoint_restore +'.meta')
loader.restore(sess, checkpoint_restore)
# train for given number of epochs
for e in range(epochs):
print(bold + "\nepoch:" + end, e)
train_epoch_cost = 0
train_epoch_acc = 0
test_epoch_acc = 0
# train for given number of steps in one epoch
for s in range(train_step):
_,train_batch_cost = sess.run([update, cost])
if s % (train_step//2) == 0 and s != 0:
print('~', end = '')
elif(s == (train_step) - 1):
print('')
train_epoch_cost += train_batch_cost/(train_step)
print(bold + "epoch_cost: " + end,train_epoch_cost)
# calculate accuracy of training set
for i in range(train_step//5):
train_epoch_acc = sess.run(accuracy_train)
train_epoch_acc += train_epoch_acc/(train_step)
print(bold + "train epoch accuracy: " + end,train_epoch_acc, "\n")
# calculate accuracy on test set
for i in range(test_step):
test_epoch_acc = sess.run(accuracy_test)
test_epoch_acc += test_epoch_acc/test_step
print(bold + "test epoch accuracy: " + end, test_epoch_acc, "\n")
# after every x epoch decrease learning rate by factor of y (var_lr = [x, y])
if var_lr[0] != None:
if e%var_lr[0] == 0:
learning_rate = learning_rate/var_lr[1]
#save all the variables
save_path = saver.save(sess, checkpoint_save)
coord.request_stop()
coord.join(threads)
print("---training over---")
```
### Evaluate
```
steps=((test_data_count))//batch_size
accu=0
x_test, y_test = minibatch(batch_size, test_file_paths, image_size, string_length, class_count)
logit_test = inference(x_test, class_count)
accuracy_test = accuracy_calc(logit_test, y_test)
init=tf.global_variables_initializer()
saver=tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
saver.restore(sess,checkpoint_restore)
for s in range(steps):
acc=sess.run(accuracy_test)
accu+=acc/steps
print("test set accuracy: ",acc)
coord.request_stop()
coord.join(threads)
all_chr = list(string.ascii_letters) + list(string.digits) + list(' ')
x_check, y_check=minibatch(batch_size, test_file_paths, image_size, string_length, class_count)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
x_c = sess.run(x_check)
eval_vizualization(x_c[1:5])
coord.request_stop()
coord.join(threads)
```
| github_jupyter |
# TV Script Generation
In this project, I have tried to generate my own [Seinfeld](https://en.wikipedia.org/wiki/Seinfeld) TV scripts using RNNs. I have used part of the [Seinfeld dataset](https://www.kaggle.com/thec03u5/seinfeld-chronicles#scripts.csv) of scripts from 9 seasons. The Neural Network will generate a new ,"fake" TV script, based on patterns it recognizes in this training data.
## Get the Data
```
# load in data
import helper
data_dir = './data/Seinfeld_Scripts.txt'
text = helper.load_data(data_dir)
```
## Explore the Data
```
view_line_range = (0, 10)
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
lines = text.split('\n')
print('Number of lines: {}'.format(len(lines)))
word_count_line = [len(line.split()) for line in lines]
print('Average number of words in each line: {}'.format(np.average(word_count_line)))
print()
print('The lines {} to {}:'.format(*view_line_range))
print('\n'.join(text.split('\n')[view_line_range[0]:view_line_range[1]]))
```
---
## Implement Pre-processing Functions
The first thing to do to any dataset is pre-processing. I will implement the following pre-processing functions below:
- Lookup Table
- Tokenize Punctuation
### Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call `vocab_to_int`
- Dictionary to go from the id to word, we'll call `int_to_vocab`
```
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
words = Counter(text)
vocab = sorted(words, key = words.get, reverse = True)
vocab_to_int = {word : ii for ii, word in enumerate(vocab)}
int_to_vocab = {ii : word for ii, word in enumerate(vocab)}
# return tuple
return (vocab_to_int, int_to_vocab)
tests.test_create_lookup_tables(create_lookup_tables)
```
### Tokenize Punctuation
```
def token_lookup():
#getting the key-value pair in a dict
punctuation = {'.': "||Period||", ',': "||Comma||", '"': "||Quotation_Mark||", ';': "||Semicolon||",
'!': "||Exclamation_Mark||", '?': "||Question_Mark||", '(': "||Left_Parentheses||",
')': "||Right_Parentheses||", '-': "||Dash||", '\n': "||Return||"
}
return punctuation
tests.test_tokenize(token_lookup)
```
## Pre-process all the data and save it
Running the code cell below will pre-process all the data and save it to file.
```
# pre-process training data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
import helper
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
```
## Build the Neural Network
```
import torch
# Check for a GPU
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('No GPU found. Please use a GPU to train your neural network.')
from torch.utils.data import TensorDataset, DataLoader
def batch_data(words, sequence_length, batch_size):
num_featues = len(words) - sequence_length #this is how many total features we can have at given sequence length
X_train = np.zeros((num_featues, sequence_length), dtype = int) #num_features gives us shape of complete X_train
y_train = np.zeros(num_featues) #num of labels is equal to num of rows in X_train
#now, we will ommit the zeros with our words with this logic
for i in range(0, num_featues):
X_train[i] = words[i:i+sequence_length]
y_train[i] = words[i+sequence_length]
#changing dtype
feature_array = np.asarray(X_train, np.int64)
target_array = np.asarray(y_train, np.int64)
data = TensorDataset(torch.from_numpy(feature_array), torch.from_numpy(target_array))
dataloader = DataLoader(data, batch_size = batch_size, shuffle = True)
return dataloader
words = np.array([99,88,77,66,55,44,33,22,11,0])
loader = batch_data(words, 5, 3)
dataiter = iter(loader)
dataiter.next()
# test dataloader
test_text = range(50)
t_loader = batch_data(test_text, sequence_length=5, batch_size=10)
data_iter = iter(t_loader)
sample_x, sample_y = data_iter.next()
print(sample_x.shape)
print(sample_x)
print()
print(sample_y.shape)
print(sample_y)
print(len(t_loader))
```
---
## Build the Neural Network
```
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):
super(RNN, self).__init__()
# set class variables
self.hidden_dim = hidden_dim
self.output_size = output_size
self.n_layers = n_layers
#embedding layer
self.embedding = nn.Embedding(vocab_size, embedding_dim)
#lstm layer
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(p=0.2)
#fully connected layer
self.fc = nn.Linear(hidden_dim, output_size)
def forward(self, nn_input, hidden):
batch_size = nn_input.size(0)
embeds = self.embedding(nn_input)
lstm_output, hidden = self.lstm(embeds, hidden)
#stack the outputs of the lstm to pass to your fully-connected layer - kinda flattening step
lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)
output = self.dropout(lstm_output)
output = self.fc(lstm_output)
output = output.view(batch_size, -1, self.output_size)
#getting the last batch of outputs
output = output[:,-1]
# return one batch of output word scores and the hidden state
return output, hidden
def init_hidden(self, batch_size):
#hidden state of dims (n_layers, batch_size, hidden_dim)
weight = next(self.parameters()).data
# initialize hidden state with zero weights, and move to GPU if available
if train_on_gpu:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
```
### Define forward and backpropagation
```
def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):
# move data to GPU, if available
if train_on_gpu:
inp, target = inp.cuda(), target.cuda()
# perform backpropagation and optimization
hidden = tuple([i.data for i in hidden])
rnn.zero_grad()
output, hidden = rnn(inp, hidden)
loss = criterion(output, target)
loss.backward()
#useing gradient clipping to prevent exploding gradients problem
nn.utils.clip_grad_norm_(rnn.parameters(), 5)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), hidden
```
## Neural Network Training
With the structure of the network complete and data ready to be fed in the neural network, it's time to train it.
```
def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
batch_losses = []
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# initialize hidden state
hidden = rnn.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# making sure to iterate over completely full batches, only
n_batches = len(train_loader.dataset)//batch_size
if(batch_i > n_batches):
break
# forward, back prop
loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)
# record loss
batch_losses.append(loss)
# printing loss stats
if batch_i % show_every_n_batches == 0:
print('Epoch: {:>4}/{:<4} Loss: {}\n'.format(
epoch_i, n_epochs, np.average(batch_losses)))
batch_losses = []
# returns a trained rnn
return rnn
```
### Hyperparameters
```
# Data params
# Sequence Length - [5, 7, 10, 15, 20, 25]
sequence_length = 15 # of words in a sequence
# Batch Size - [64, 128, 256]
batch_size = 128
# data loader - do not change
train_loader = batch_data(int_text, sequence_length, batch_size)
# Training parameters
# Number of Epochs
num_epochs = 25
# Learning Rate - [0.001 - 0.005, 0.01, 0.1]
learning_rate = 0.001
# Model parameters
# Vocab size
vocab_size = len(int_to_vocab)
# Output size
output_size = vocab_size
# Embedding Dimension - [200, 400, 600]
embedding_dim = 128
# Hidden Dimension - [300, 500, 1000]
hidden_dim = 500
# Number of RNN Layers - [2,3]
n_layers = 2
# Show stats for every n number of batches
show_every_n_batches = 500
```
### Train
```
import time
t0 = time.time()
from workspace_utils import active_session
with active_session():
# create model and move to gpu if available
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)
if train_on_gpu:
rnn.cuda()
# defining loss and optimization functions for training
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# training the model
trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)
# saving the trained model
helper.save_model('./save/trained_rnn', trained_rnn)
print('Model Trained and Saved')
t1 = time.time()
print('Time taken:', (t1-t0)/3600, 'hours')
```
---
# Checkpoint
```
import torch
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
trained_rnn = helper.load_model('./save/trained_rnn')
```
## Generate TV Script
With the network trained and saved, I can use it to generate a new, "fake" Seinfeld TV script.
### Generate Text
To generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores!
```
import torch.nn.functional as F
def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):
rnn.eval()
# create a sequence (batch_size=1) with the prime_id
current_seq = np.full((1, sequence_length), pad_value)
current_seq[-1][-1] = prime_id
predicted = [int_to_vocab[prime_id]]
for _ in range(predict_len):
if train_on_gpu:
current_seq = torch.LongTensor(current_seq).cuda()
else:
current_seq = torch.LongTensor(current_seq)
# initialize the hidden state
hidden = rnn.init_hidden(current_seq.size(0))
# get the output of the rnn
output, _ = rnn(current_seq, hidden)
# get the next word probabilities
p = F.softmax(output, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# use top_k sampling to get the index of the next word
top_k = 5
p, top_i = p.topk(top_k)
top_i = top_i.numpy().squeeze()
# select the likely next word index with some element of randomness
p = p.numpy().squeeze()
word_i = np.random.choice(top_i, p=p/p.sum())
# retrieve that word from the dictionary
word = int_to_vocab[word_i]
predicted.append(word)
# the generated word becomes the next "current sequence" and the cycle can continue
current_seq = np.roll(current_seq, -1, 1)
current_seq[-1][-1] = word_i
gen_sentences = ' '.join(predicted)
# Replace punctuation tokens
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
gen_sentences = gen_sentences.replace(' ' + token.lower(), key)
gen_sentences = gen_sentences.replace('\n ', '\n')
gen_sentences = gen_sentences.replace('( ', '(')
# return all the sentences
return gen_sentences
```
### Generate a New Script
It's time to generate the text.
```
gen_length = 400
prime_word = 'jerry' # name for starting the script
pad_word = helper.SPECIAL_WORDS['PADDING']
generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)
print(generated_script)
```
#### Save your favorite scripts
Once you have a script that you like (or find interesting), save it to a text file!
```
# save script to a text file
f = open("generated_script_1.txt","w")
f.write(generated_script)
f.close()
```
| github_jupyter |
```
import panel as pn
pn.extension('plotly')
```
The ``HoloViews`` pane renders HoloViews plots with one of the plotting backends supported by HoloViews. It supports the regular HoloViews widgets for exploring the key dimensions of a ``HoloMap`` or ``DynamicMap``, but is more flexible than the native HoloViews widgets since it also allows customizing widget types and their position relative to the plot.
#### Parameters:
For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).
* **``backend``** (str): Any of the supported HoloViews backends ('bokeh', 'matplotlib', or 'plotly')
* **``center``** (boolean, default=False): Whether to center the plot
* **``linked_axes``** (boolean, default=True): Whether to link axes across plots in a panel layout
* **``object``** (object): The HoloViews object being displayed
* **``widget_location``** (str): Where to lay out the widget relative to the plot
* **``widget_layout``** (ListPanel type): The object to lay the widgets out in, one of ``Row``, ``Column`` or ``WidgetBox``
* **``widget_type``** (str): Whether to generate individual widgets for each dimension, or to use a global linear scrubber with dimensions concatenated.
* **``widgets``** (dict): A mapping from dimension name to a widget class, instance, or dictionary of overrides to modify the default widgets.
##### Display
* **``default_layout``** (pn.layout.Panel, default=Row): Layout to wrap the plot and widgets in
___
The `panel` function will automatically convert any ``HoloViews`` object into a displayable panel, while keeping all of its interactive features:
```
import numpy as np
import holoviews as hv
box = hv.BoxWhisker((np.random.randint(0, 10, 100), np.random.randn(100)), 'Group').sort()
hv_layout = pn.panel(box)
hv_layout
```
By setting the pane's ``object`` the plot can be updated like all other pane objects:
```
hv_layout.object = hv.Violin(box).opts(violin_color='Group', cmap='Category20')
```
### Widgets
HoloViews natively renders plots with widgets if a HoloMap or DynamicMap declares any key dimensions. Unlike Panel's ``interact`` functionality, this approach efficiently updates just the data inside a plot instead of replacing it entirely. Calling ``pn.panel`` on the DynamicMap will return a ``Row`` layout (configurable via the ``default_layout`` option), which is equivalent to calling ``pn.pane.HoloViews(dmap).layout``:
```
import pandas as pd
import hvplot.pandas
import holoviews.plotting.bokeh
def sine(frequency=1.0, amplitude=1.0, function='sin'):
xs = np.arange(200)/200*20.0
ys = amplitude*getattr(np, function)(frequency*xs)
return pd.DataFrame(dict(y=ys), index=xs).hvplot()
dmap = hv.DynamicMap(sine, kdims=['frequency', 'amplitude', 'function']).redim.range(
frequency=(0.1, 10), amplitude=(1, 10)).redim.values(function=['sin', 'cos', 'tan'])
hv_panel = pn.panel(dmap)
hv_panel.pprint()
```
We can see the widgets generated for each of the dimensions and arrange them any way we like, e.g. by unpacking them into a ``Row``:
```
widgets = hv_panel[1]
pn.Column(
pn.Row(*widgets),
hv_panel[0])
```
However, more conveniently the HoloViews pane offers options to lay out the plot and widgets in a number of preconfigured arrangements using the ``center`` and ``widget_location`` parameters.
```
pn.panel(dmap, center=True, widget_location='right_bottom')
```
The ``widget_location`` parameter accepts all of the following options:
['left', 'bottom', 'right', 'top', 'top_left', 'top_right', 'bottom_left',
'bottom_right', 'left_top', 'left_bottom', 'right_top', 'right_bottom']
#### Customizing widgets
As we saw above, the HoloViews pane will automatically try to generate appropriate widgets for the type of data, usually defaulting to ``DiscreteSlider`` and ``Select`` widgets. This behavior can be modified by providing a dictionary of ``widgets`` by dimension name. The values of this dictionary can override the default widget in one of three ways:
* Supplying a ``Widget`` instance
* Supplying a compatible ``Widget`` type
* Supplying a dictionary of ``Widget`` parameter overrides
``Widget`` instances will be used as they are supplied and are expected to provide values matching compatible with the values defined on HoloMap/DynamicMap. Similarly if a ``Widget`` type is supplied it should be discrete if the parameter space defines a discrete set of values. If the defined parameter space is continuous, on the other hand, it may supply any valid value.
In the example below the 'amplitude' dimension is overridden with an explicit ``Widget`` instance, the 'function' dimension is overridden with a RadioButtonGroup letting us toggle between the different functions, and lastly the 'value' parameter on the 'frequency' widget is overridden to change the initial value:
```
hv_panel = pn.pane.HoloViews(dmap, widgets={
'amplitude': pn.widgets.LiteralInput(value=1., type=(float, int)),
'function': pn.widgets.RadioButtonGroup,
'frequency': {'value': 5}
}).layout
```
### Switching backends
The ``HoloViews`` pane will default to the Bokeh backend if no backend has been loaded, but you can override the backend as needed.
```
import holoviews.plotting.mpl
import holoviews.plotting.plotly
hv_pane = pn.pane.HoloViews(dmap, backend='matplotlib')
hv_pane
```
The ``backend``, like all other parameters, can be modified after the fact. To demonstrate, we can set up a select widget to toggle between backends for the above plot:
```
backend_select = pn.widgets.RadioButtonGroup(name='Backend Selector:', options=['bokeh', 'matplotlib', 'plotly'])
backend_select.link(hv_pane[0], value='backend')
backend_select
```
| github_jupyter |
```
import pandas as pd
#Loading data from the Github repository to colab notebook
filename = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter15/Dataset/crx.data'
# Loading the data using pandas
credData = pd.read_csv(filename,sep=",",header = None,na_values = "?")
credData.head()
# Changing the Classes to 1 & 0
credData.loc[credData[15] == '+' , 15] = 1
credData.loc[credData[15] == '-' , 15] = 0
credData.head()
# Dropping all the rows with na values
newcred = credData.dropna(axis = 0)
newcred.shape
# Seperating the categorical variables to make dummy variables
credCat = pd.get_dummies(newcred[[0,3,4,5,6,8,9,11,12]])
# Seperating the numerical variables
credNum = newcred[[1,2,7,10,13,14]]
# Making the X variable which is a concatenation of categorical and numerical data
X = pd.concat([credCat,credNum],axis = 1)
print(X.shape)
# Seperating the label as y variable
y = pd.Series(newcred[15], dtype="int")
print(y.shape)
# Normalising the data sets
# Import library function
from sklearn import preprocessing
# Creating the scaling function
minmaxScaler = preprocessing.MinMaxScaler()
# Transforming with the scaler function
X_tran = pd.DataFrame(minmaxScaler.fit_transform(X))
# Printing the output
X_tran.head()
# Splitting the data set to train and test sets
from sklearn.model_selection import train_test_split
# Splitting the data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X_tran, y, test_size=0.3, random_state=123)
```
**MaxVoting**
```
# Defining the Voting classifier and three individual learners
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Defining the models
model1 = LogisticRegression(random_state=123)
model2 = KNeighborsClassifier(n_neighbors=5)
model3 = RandomForestClassifier(n_estimators=500)
# Defining the ensemble model using VotingClassifier
model = VotingClassifier(estimators=[('lr', model1),('knn', model2),('rf',model3)], voting='hard')
# Fitting the model on the training set
model.fit(X_train,y_train)
# Predicting accuracy on the test set using .score() function
model.score(X_test,y_test)
# Generating the predictions on the test set
preds = model.predict(X_test)
# Printing the confusion matrix
from sklearn.metrics import confusion_matrix
# Confusion matrix for the test set
print(confusion_matrix(y_test, preds))
# Printing the classification report
from sklearn.metrics import classification_report
print(classification_report(y_test, preds))
```
| github_jupyter |
# scatter_selector widget
A set of custom matplotlib widgets that allow you to select points on a scatter plot as use that as input to other interactive plots. There are three variants that differ only in what they pass to their callbacks:
1. {obj}`.scatter_selector`: callbacks will receive `index, (x, y)` where `index` is the position of the point in the of the points.
2. {obj}`.scatter_selector_value`: callbacks will receive `x, y`
3. {obj}`.scatter_selector_index`: callbacks will receive `index`
In this example we will use {obj}`.scatter_selector_index` along with the `indexer` convenience function to make line plots of stock data. However, you can use custom functions for the interactive plots, or even attach your own callbacks to the scatter_selector widgets.
## PCA of Stock Data
For this example we will plot companies in SP500 in a scatter plot by principle components extracted from principal components analysis (PCA) an interactive visualization of companies in SP500 using [PCA](https://towardsdatascience.com/a-one-stop-shop-for-principal-component-analysis-5582fb7e0a9c). The data was originally obtained from <https://www.kaggle.com/camnugent/sandp500> and the data was cleaned using code derived from <https://github.com/Hekstra-Lab/scientific-python-bootcamp/tree/master/day3>
```
%matplotlib ipympl
import pickle
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
import mpl_interactions.ipyplot as iplt
from mpl_interactions import indexer, panhandler, zoom_factory
from mpl_interactions.utils import indexer
from mpl_interactions.widgets import scatter_selector_index
```
### Data loading/cleaning
For this example we have pre-cleaned data that we will just load. If you are curious on how the data was originally processed you see the full code at the bottom of this notebook.
The datafiles that we load for this example are available for download at <https://github.com/ianhi/mpl-interactions/tree/master/docs/examples/data>
```
import pickle
with open("data/stock-metadata.pickle", "rb") as f:
meta = pickle.load(f)
prices = np.load("data/stock-prices.npz")["prices"]
names = meta["names"]
good_idx = meta["good_idx"] # only plot the ones for which we were able to parse sector info
data_colors = meta["data_colors"]
# calculate the daily price difference
price_changes = np.diff(prices)
# Below is a pretty standard way of normalizing numerical data
normalized_price_changes = price_changes - price_changes.mean(axis=-1, keepdims=True)
normalized_price_changes /= price_changes.std(axis=-1, keepdims=True)
# calculate the covariance matrix
covariance = np.cov(normalized_price_changes.T)
# Calculate the eigenvectors (i.e. the principle components)
evals, evecs = np.linalg.eig(covariance)
evecs = np.real(evecs)
# project the companies onto the principle components
transformed = normalized_price_changes @ evecs
# take only the first two components for plotting
# we also take only the subset of companies for which it was easy to extract a sector and a name
x, y = transformed[good_idx][:, 0], transformed[good_idx][:, 1]
```
### Making the plot
We create the left scatter plot using the `scatter_selector_index` which will tell use the index of the company that was clicked on. Since this is just a Matplotlib `AxesWidget` it can be passed directly to `iplt.plot` as a kwarg and the `controls` object will handle it appropriately.
In this example we also make use of the function `mpl_interactions.utils.indexer`. This is a convenience function that handles indexing an array for you. So these two statements are equivalent:
```python
# set up data
arr = np.random.randn(4,100).cumsum(-1)
def f(idx):
return arr[idx]
iplt.plot(f, idx=np.arange(4))
# or equivalently
iplt.plot(indexer(arr), idx=np.arange(4))
```
```
fig, axs = plt.subplots(1, 2, figsize=(10, 5), gridspec_kw={"width_ratios": [1.5, 1]})
index = scatter_selector_index(axs[0], x, y, c=data_colors, cmap="tab20")
# plot all the stock traces in light gray
plt.plot(prices.T, color="k", alpha=0.05)
# add interactive components to the subplot on the right
# note the use of indexer
controls = iplt.plot(indexer(prices), idx=index, color="r")
iplt.title(indexer(names), controls=controls["idx"])
# styling + zooming
axs[0].set_xlabel("PC-1")
axs[0].set_ylabel("PC-2")
axs[1].set_xlabel("days")
axs[1].set_ylabel("Price in $")
axs[1].set_yscale("log")
cid = zoom_factory(axs[0])
ph = panhandler(fig)
```

### Datacleaning
Below is the code we used to clean and save the datasets. While we start out with 500 companies we end up with only 468 as some of them we were unable to easily and correctly parse so they were thrown away.
```
# NBVAL_SKIP
# Download the data from https://www.kaggle.com/camnugent/sandp500
# and save it into a folder named `data`
import glob
test = np.loadtxt("data/A_data.csv", delimiter=",", skiprows=1, usecols=1)
sp500_glob = glob.glob(
"data/*.csv",
)
names = []
prices = np.zeros((len(sp500_glob), test.shape[0]))
prices_good = []
fails = []
for i, f in enumerate(sp500_glob):
fname = f.split("/")[-1]
names.append(fname.split("_")[0])
try:
prices[i] = np.loadtxt(f, delimiter=",", skiprows=1, usecols=1)
prices_good.append(True)
except:
fails.append(fname.split("_")[0])
prices_good.append(False)
pass
prices = prices[prices_good]
np.savez_compressed("data/stock-prices.npz", prices=prices)
# processing names and sector info
arr = np.loadtxt("data/SP500_names.csv", delimiter="|", skiprows=1, dtype=str, encoding="utf-8")
name_dict = {a[0].strip(): a[[1, 2, 3]] for a in arr}
# idx_to_info = {i:name_dict[real_names[i]] for i in range(468)}
good_names = []
primary = []
secondary = []
good_idx = np.zeros(real_names.shape[0], dtype=bool)
for i, name in enumerate(real_names):
try:
info = name_dict[name]
good_idx[i] = True
good_names.append(info[0])
primary.append(info[1])
secondary.append(info[2])
except:
pass
psector_dict = {val: i for i, val in enumerate(np.unique(primary))}
data_colors = np.array([psector_dict[val] for val in primary], dtype=int)
import pickle
meta = {
"good_idx": good_idx,
"names": good_names,
"sector": psector_dict,
"data_colors": data_colors,
}
with open("data/stock-metadata.pickle", "wb") as outfile:
pickle.dump(meta, outfile)
```
| github_jupyter |
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
```
# WeatherPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
## Generate Cities List
```
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
cities
```
### Perform API Calls
* Perform a weather check on each city using a series of successive API calls.
* Include a print log of each city as it'sbeing processed (with the city number and city name).
```
api_key=weather_api_key
url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + weather_api_key
city_name=[]
lat=[]
lng=[]
max_temp=[]
humidity=[]
cloudiness=[]
wind_speed=[]
country=[]
date=[]
count=1
print(f'Beggining Data Retrival')
print(f'*************************************')
for city in cities:
try:
cities_data=(requests.get(f'{url}&q={city}').json())
city_name.append(cities_data["name"])
lat.append(cities_data["coord"]["lat"])
lng.append(cities_data["coord"]["lon"])
max_temp.append(cities_data["main"]["temp"])
humidity.append(cities_data["main"]["humidity"])
cloudiness.append(cities_data["clouds"]["all"])
wind_speed.append(cities_data["wind"]["speed"])
print(f'City {count} of {len(cities)} | {city}')
count = count + 1
except KeyError:
print(f'City {count} not found, skipping...')
count = count + 1
print('Data Retrival Complete')
print('***************************************')
```
### Convert Raw Data to DataFrame
* Export the city data into a .csv.
* Display the DataFrame
```
cities_data_df=pd.DataFrame({'City': city_name,
'Lat': lat,
'Lng': lng,
'Max Temp': max_temp,
'Humidity': humidity,
'Cloudiness': cloudiness,
'Wind Speed': wind_speed})
pd.DataFrame.to_csv(cities_data_df,'cities_data.csv')
cities_data_df.head()
cities_data_df.describe()
```
## Inspect the data and remove the cities where the humidity > 100%.
----
Skip this step if there are no cities that have humidity > 100%.
```
cleaned_cities_df=cities_data_df.loc[cities_data_df["Humidity"]<=100]
cleaned_cities_df.head()
```
## Plotting the Data
* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
* Save the plotted figures as .pngs.
## Latitude vs. Temperature Plot
```
plt.scatter(cleaned_cities_df['Lat'], cleaned_cities_df['Max Temp']
,edgecolors="black",facecolors="steelblue")
plt.title(f'City Latitude vs. Max Temperature (02/05/2021) ')
plt.xlabel('Latitude')
plt.ylabel('Max Temperature (F)')
plt.grid(True)
plt.savefig('lat_temp.png')
```
## Latitude vs. Humidity Plot
```
plt.scatter(cleaned_cities_df['Lat'], cleaned_cities_df['Humidity']
,edgecolors="black",facecolors="steelblue")
plt.title(f'City Latitude vs. Humidity (02/05/2021) ')
plt.xlabel('Latitude')
plt.ylabel('Humidity (%)')
plt.grid(True)
plt.savefig('lat_temp.png')
```
## Latitude vs. Cloudiness Plot
```
plt.scatter(cleaned_cities_df['Lat'], cleaned_cities_df['Cloudiness']
,edgecolors="black",facecolors="steelblue")
plt.title(f'City Latitude vs. Cloudiness (02/05/2021) ')
plt.xlabel('Latitude')
plt.ylabel('Cloudiness (%)')
plt.grid(True)
plt.savefig('lat_temp.png')
```
## Latitude vs. Wind Speed Plot
```
plt.scatter(cleaned_cities_df['Lat'], cleaned_cities_df['Wind Speed']
,edgecolors="black",facecolors="steelblue")
plt.title(f'City Latitude vs. Wind Speed (02/05/2021) ')
plt.xlabel('Latitude')
plt.ylabel('Wind Speed (%)')
plt.grid(True)
plt.savefig('lat_temp.png')
```
## Linear Regression
#### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
```
from scipy.stats import linregress
north_df = cleaned_cities_df.loc[pd.to_numeric(cleaned_cities_df["Lat"]).astype(float)>0,:]
south_df = cleaned_cities_df.loc[pd.to_numeric(cleaned_cities_df["Lat"]).astype(float)<0,:]
x_values = pd.to_numeric(north_df['Lat']).astype(float)
y_values = pd.to_numeric(north_df['Max Temp']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
print(f"R-Value: {rvalue}")
plt.show()
```
#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
```
x_values = pd.to_numeric(south_df['Lat']).astype(float)
y_values = pd.to_numeric(south_df['Max Temp']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
print(f"R-Value: {rvalue}")
plt.show()
```
#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
```
x_values = pd.to_numeric(north_df['Lat']).astype(float)
y_values = pd.to_numeric(north_df['Humidity']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
print(f"R-Value: {rvalue}")
plt.show()
```
#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
```
x_values = pd.to_numeric(south_df['Lat']).astype(float)
y_values = pd.to_numeric(south_df['Humidity']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
print(f"R-Value: {rvalue}")
plt.show()
```
#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
```
x_values = pd.to_numeric(north_df['Lat']).astype(float)
y_values = pd.to_numeric(north_df['Cloudiness']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
print(f"R-Value: {rvalue}")
plt.show()
```
#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
```
x_values = pd.to_numeric(south_df['Lat']).astype(float)
y_values = pd.to_numeric(south_df['Cloudiness']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
print(f"R-Value: {rvalue}")
plt.show()
```
#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
```
x_values = pd.to_numeric(north_df['Lat']).astype(float)
y_values = pd.to_numeric(north_df['Wind Speed']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
print(f"R-Value: {rvalue}")
plt.show()
```
#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
```
x_values = pd.to_numeric(south_df['Lat']).astype(float)
y_values = pd.to_numeric(south_df['Wind Speed']).astype(float)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
plt.scatter(x_values,y_values,edgecolors="black",facecolors="steelblue")
plt.plot(x_values,regress_values,"red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
print(f"R-Value: {rvalue}")
plt.show()
```
| github_jupyter |
<h4>Unit 1 <h1 style="text-align:center"> Chapter 4</h1>
---
## Normalization
> Normalization is the task of putting words/tokens in a standard format.Normalization is benefecial despite the spelling information that is lost.
#### Case folding
---
> Mapping everything to the same case is called case folding.
Case folding is helpful for tasks like speech recognition, information retrieval.
For sentiment analysis and other text classification tasks, information
extraction, and machine translation, by contrast, case can be quite helpful and case
folding is generally not done.
Example,
'US' the country and 'us' the pronoun can outweigh the advantage in
generalization that case folding would have provided for other words.
---
#### Case folding using python
```
sentence = 'THIS string Has a MIX of lowercase AND UPPERCASE'
# Case folding to lowercase
print("Lower case ->", sentence.lower())
# Case folding to UPPERCASE
print("Upper case ->", sentence.upper())
# Case folding to first letter of first word in uppercase
print("Capitalized case ->", sentence.capitalize())
# Case folding to title case
print("Title case ->", sentence.title())
# More aggressive lower()
print("Casefold -> ", sentence.casefold())
```
#### Lemmatization
---
> Lemmatization is the task of determinig that the two words have the same root despite their surface differences.
Example,
Dinner & Dinners have the same <strong>lemma</strong> - Dinner
<strong>Why is lemmatization done?</strong>
There can be many reasons. One of the reasons is to reduce the vocabulary so that it does not have multiple words with exact same meaning.
<strong>How is lemmatization done?</strong>
Lemmatization method involves morphological parsing of the words.
---
#### Morphology
> Morphology is the study of how the words are built from smaller bearing units called <strong>morphemes</strong>.
There are two main classes of morphemes,
- <strong>Stem</strong> - The central morpheme of a word acting as the main.
- <strong>Affixes</strong> - The additional part that gives the word a variation.
For example,
Suppose the word is <strong>Dinners</strong>, here,
<strong>Stem</strong> = Dinner
<strong>Affix</strong> = -s
---
> Lemmatization algorithms can be complex in nature. Hence, sometimes stemming is used for naive morphological analysis.
---
#### Stemming
> Stemming is a naive version of morphological analysis in which consists of removing word affixes to normalize the word.
The most commonly used stemming algorithm is <strong>The Porter Stemmer</strong> in which the text is run through a series of steps as a <strong>cascade</strong> in which the output of a pass is passed as input to the next.
Stemming is essentially a way of normalizing text through a series of rules, hence there are some errors of under and over generalization.
---
#### Lemmatization and Stemming using python
```
from nltk.stem import WordNetLemmatizer
import nltk
#nltk.download('all')
lemmatizer = WordNetLemmatizer()
print(lemmatizer.lemmatize('I am going to the market to get some groceries'))
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps = PorterStemmer()
print(ps.stem("I am going to the market to get some groceries"))
```
---
### Sentence segmentation
> Sentence segmentation is the task of dividing text into sentences. This can be done by taking help from punctuations like ., , !,?
But this method can be confusing when the text has abbreviations like Mr., Miss., Inc. etc.
There are other better methods for segmenting text into sentences.
We will be discussing smarter methods in later notebooks. Let's look at a few naive approaches for sentence segmentation.
---
##### Sentence segmentation using Python
```
sentence_1 = 'How was your day? Were you able to get stuff done? I\'ll be taking a leave tommorow. \
Hope it\'s okay.'
import re
segments_1 = re.split('[?,.]',sentence_1)
print(len(segments_1))
print(segments_1)
sentence_2 = 'Hello Mr. Brown. How was your day today?'
segments_2 = re.split('[?,.]',sentence_2)
print(len(segments_2))
print(segments_2)
# Notice how this breaks Mr. and Brown
```
> Another heuristic approach for sentence segmentation is to use a dictionary having common abbreviations, and then perform dictionary matching.
| github_jupyter |
# Calculating Thermodynamics Observables with a quantum computer
```
# imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from functools import partial
from qiskit.utils import QuantumInstance
from qiskit import Aer
from qiskit.algorithms import NumPyMinimumEigensolver, VQE
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import (
ElectronicStructureDriverType,
ElectronicStructureMoleculeDriver,
)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
from qiskit_nature.algorithms import GroundStateEigensolver
import qiskit_nature.constants as const
from qiskit_nature.algorithms.pes_samplers import BOPESSampler, EnergySurface1DSpline
from thermodynamics_utils.thermodynamics import constant_volume_heat_capacity
from thermodynamics_utils.vibrational_structure_fd import VibrationalStructure1DFD
from thermodynamics_utils.partition_function import DiatomicPartitionFunction
from thermodynamics_utils.thermodynamics import Thermodynamics
import warnings
warnings.simplefilter("ignore", np.RankWarning)
```
A preliminary draft with more information related to this tutorial can be found in preprint: Stober et al, arXiv 2003.02303 (2020)
### Calculation of the Born Oppenheimer Potential Energy Surface (BOPES)
To compute thermodynamic observables we begin with single point energy calculation which calculates the wavefunction and charge density and therefore the energy of a particular arrangement of nuclei. Here we compute the Born-Oppenheimer potential energy surface of a hydrogen molecule, as an example, which is simply the electronic energy as a function of bond length.
```
qubit_converter = QubitConverter(mapper=JordanWignerMapper())
quantum_instance = QuantumInstance(backend=Aer.get_backend("aer_simulator_statevector"))
solver = VQE(quantum_instance=quantum_instance)
me_gss = GroundStateEigensolver(qubit_converter, solver)
stretch1 = partial(Molecule.absolute_distance, atom_pair=(1, 0))
mol = Molecule(
geometry=[("H", [0.0, 0.0, 0.0]), ("H", [0.0, 0.0, 0.2])],
degrees_of_freedom=[stretch1],
masses=[1.6735328e-27, 1.6735328e-27],
)
# pass molecule to PSYCF driver
driver = ElectronicStructureMoleculeDriver(mol, driver_type=ElectronicStructureDriverType.PYSCF)
es_problem = ElectronicStructureProblem(driver)
# BOPES sampler testing
bs = BOPESSampler(gss=me_gss, bootstrap=True)
points = np.linspace(0.45, 5, 50)
res = bs.sample(es_problem, points)
energies = []
bs_res_full = res.raw_results
for point in points:
energy = bs_res_full[point].computed_energies + bs_res_full[point].nuclear_repulsion_energy
energies.append(energy)
fig = plt.figure()
plt.plot(points, energies)
plt.title("Dissociation profile")
plt.xlabel("Interatomic distance")
plt.ylabel("Energy")
energy_surface = EnergySurface1DSpline()
xdata = res.points
ydata = res.energies
energy_surface.fit(xdata=xdata, ydata=ydata)
plt.plot(xdata, ydata, "kx")
x = np.arange(min(xdata) - 0.25, max(xdata) + 0.25, 0.05)
plt.plot(x, energy_surface.eval(x), "r-")
plt.xlabel(r"distance, $\AA$")
plt.ylabel("energy, Hartree")
dist = max(ydata) - min(ydata)
plt.ylim(min(ydata) - 0.1 * dist, max(ydata) + 0.1 * dist)
```
### Calculation of the molecular Vibrational Energy levels
The Born-Oppeheimer approximation removes internuclear vibrations from the molecular Hamiltonian and the energy computed from quantum mechanical ground-state energy calculations using this approximation contain only the electronic energy. Since even at absolute zero internuclear vibrations still occur, a correction is required to obtain the true zero-temperature energy of a molecule. This correction is called the zero-point vibrational energy (ZPE), which is computed by summing the contribution from internuclear vibrational modes. Therefore, the next step in computing thermodynamic observables is determining the vibrational energy levels. This can be done by constructing the Hessian matrix based on computed single point energies close to the equilibrium bond length. The eigenvalues of the Hessian matrix can then be used to determine the vibrational energy levels and the zero-point vibrational energy
\begin{equation}
{\rm ZPE} = \frac{1}{2}\, \sum_i ^M \nu_i \, ,
\end{equation}
with $\nu_i$ being the vibrational frequencies, $M = 3N − 6$ or $M = 3N − 5$ for non-linear or linear molecules, respectively, and $N$ is number of the particles.
Here we fit a "full" energy surface using a 1D spline potential and use it to evaluate molecular vibrational energy levels.
```
vibrational_structure = VibrationalStructure1DFD(mol, energy_surface)
plt.plot(xdata, ydata, "kx")
x = np.arange(min(xdata) - 0.25, max(xdata) + 0.25, 0.05)
plt.plot(x, energy_surface.eval(x), "r-")
plt.xlabel(r"distance, $\AA$")
plt.ylabel("energy, Hartree")
dist = max(ydata) - min(ydata)
plt.ylim(min(ydata) - 0.1 * dist, max(ydata) + 0.1 * dist)
for N in range(15):
on = np.ones(x.shape)
on *= energy_surface.eval(
energy_surface.get_equilibrium_geometry()
) + vibrational_structure.vibrational_energy_level(N)
plt.plot(x, on, "g:")
on = np.ones(x.shape)
plt.show()
```
### Create a partition function for the calculation of heat capacity
The partition function for a molecule is the product of contributions from translational, rotational, vibrational, electronic, and nuclear degrees of freedom. Having the vibrational frequencies, now we can obtain the vibrational partition function $q_{\rm vibration}$ to compute the whole molecular partition function
\begin{equation}
q_{\rm vibration} = \prod_{i=1} ^M \frac{\exp\,(-\Theta_{\nu_i}/2T)}{1-\exp\,(-\Theta_{\nu_i}/2T} \, .
\end{equation}
Here $\Theta_{\nu_i}= h\nu_i/k_B$, $T$ is the temperature and $k_B$ is the Boltzmann constant.
The single-point energy calculations and the resulting partition function can be used to calculate the (constant volume or constant pressure) heat capacity of the molecules. The constant volume heat capacity, for example, is given by
\begin{equation}
C_v = \left.\frac{\partial U}{\partial T}\right|_{N,V}\, ,
\qquad
{\rm with} \quad
U=k_B T^2 \left.\frac{\partial {\rm ln} Q}{\partial T}\right|_{N,V} .
\end{equation}
$U$ is the internal energy, $V$ is the volume and $Q$ is the partition function.
Here we illustrate the simplest usage of the partition function, namely creating a Thermodynamics object to compute properties like the constant pressure heat capacity defined above.
```
Q = DiatomicPartitionFunction(mol, energy_surface, vibrational_structure)
P = 101350 # Pa
temps = np.arange(10, 1050, 5) # K
mol.spins = [1 / 2, 1 / 2]
td = Thermodynamics(Q, pressure=101350)
td.set_pressure(101350)
temps = np.arange(10, 1500, 5)
ymin = 5
ymax = 11
plt.plot(temps, td.constant_pressure_heat_capacity(temps) / const.CAL_TO_J)
plt.xlim(0, 1025)
plt.ylim(ymin, ymax)
plt.xlabel("Temperature, K")
plt.ylabel("Cp, cal mol$^{-1}$ K$^{-1}$")
plt.show()
```
Here we demonstrate how to access particular components (the rotational part) of the partition function, which in the H2 case we can further split to para-hydrogen and ortho-hydrogen components.
```
eq = Q.get_partition(part="rot", split="eq")
para = Q.get_partition(part="rot", split="para")
ortho = Q.get_partition(part="rot", split="ortho")
```
We will now plot the constant volume heat capacity (of the rotational part) demonstrating how we can call directly the functions in the 'thermodynamics' module, providing a callable object for the partition function (or in this case its rotational component). Note that in the plot we normalize the plot dividing by the universal gas constant R (Avogadro's number times Boltzmann's constant) and we use crossed to compare with experimental data found in literature.
```
# REFERENCE DATA from literature
df_brink_T = [80.913535, 135.240157, 176.633783, 219.808499, 246.226899]
df_brink_Cv = [0.118605, 0.469925, 0.711510, 0.833597, 0.895701]
df_eucken_T = [
25.120525,
30.162485,
36.048121,
41.920364,
56.195875,
62.484934,
72.148692,
73.805910,
73.804236,
92.214423,
180.031917,
230.300866,
]
df_eucken_Cv = [
0.012287,
0.012354,
0.008448,
0.020478,
0.032620,
0.048640,
0.048768,
0.076678,
0.078670,
0.170548,
0.667731,
0.847681,
]
df_gia_T = [
190.919338,
195.951254,
202.652107,
204.292585,
209.322828,
225.300754,
234.514217,
243.747768,
]
df_gia_Cv = [0.711700, 0.723719, 0.749704, 0.797535, 0.811546, 0.797814, 0.833793, 0.845868]
df_parting_T = [80.101665, 86.358919, 185.914204, 239.927797]
df_parting_Cv = [0.084730, 0.138598, 0.667809, 0.891634]
df_ce_T = [
80.669344,
135.550569,
145.464190,
165.301153,
182.144856,
203.372528,
237.993108,
268.696642,
294.095771,
308.872014,
]
df_ce_Cv = [
0.103048,
0.467344,
0.541364,
0.647315,
0.714078,
0.798258,
0.891147,
0.944848,
0.966618,
0.985486,
]
HeatCapacity = constant_volume_heat_capacity
R = const.N_A * const.KB_J_PER_K
plt.plot(temps, HeatCapacity(eq, temps) / R, "-k", label="Cv_rot Equilibrium")
plt.plot(temps, HeatCapacity(para, temps) / R, "-b", label="Cv_rot Para")
plt.plot(temps, HeatCapacity(ortho, temps) / R, "-r", label="Cv_rot Ortho")
plt.plot(
temps,
0.25 * HeatCapacity(para, temps) / R + 0.75 * HeatCapacity(ortho, temps) / R,
"-g",
label="Cv_rot 1:3 para:ortho",
)
plt.plot(df_brink_T, df_brink_Cv, "+g")
plt.plot(df_eucken_T, df_eucken_Cv, "+g")
plt.plot(df_gia_T, df_gia_Cv, "+g")
plt.plot(df_parting_T, df_parting_Cv, "+g")
plt.plot(df_ce_T, df_ce_Cv, "+g", label="experimental data")
plt.legend(loc="upper right", frameon=False)
plt.xlim(10, 400)
plt.ylim(-0.1, 2.8)
plt.xlabel("Temperature, K")
plt.ylabel("Cv (rotational)/R")
plt.tight_layout()
plt.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
| github_jupyter |
# PICES Regional Ecosystem Tool
## Data acquisition, analysis, plotting & saving to facilitate IEA report
### Developed by Chelle Gentemann (cgentemann@gmail.com) & Marisol Garcia-Reyes (marisolgr@gmail.com)
***
***
# Instructions
## To configure:
### In the cell marked by <b>`* Configuration *`</b>, specify: Region, Variable, Date Period
## To execute:
### Click on the top menu click: <b>`Run`</b> -> <b>`Run All`</b>
***
***
# Regions
<table width='100%'><tr>
<td width='10%'>
<b>Region Number</b><br><br>
11<br><br>12<br><br>13<br><br>14<br><br>15<br><br>16<br><br>17
<br><br>18<br><br>19<br><br>20<br><br>21<br><br>22<br><br>23<br><br>24</td>
<td width='20%' text-align='left'>
<b> Region Name</b><br><br>
California Current<br><br>
Gulf of Alaska<br><br>
East Bering Sea<br><br>
North Bering Sea<br><br>
Aleutian Islands<br><br>
West Bering Sea<br><br>
Sea of Okhotsk<br><br>
Oyashio Current<br><br>
R19<br><br>
Yellow Sea<br><br>
East China Sea<br><br>
Kuroshio Current<br><br>
West North Pacific<br><br>
East North Pacific<br></td>
<td>
<img src='./utils/figures/PICES_all_regions_mask_ne.png' alt='North Pacific LME Regions' width='750'>
</td>
</tr>
</table>
***
# Variables
## 1) <b>SST</b>: Sea Surface Temperature ('1981-01-01' - present)
## 2) <b>Chl</b>: Chlorphyll-a Concentration ('1997-01-01' - '2018-06-30')
## 3) <b>Wind_U, Wind_V, Wind_Speed</b>: Wind Speed Vectors & Speed ('1997-01-01' - present)
## 4) <b>Current_U, Current_V, Current_Speed</b>: Sea Surface Currents Vectors ('1992-01-01' - present)
## 5) <b>SLA</b>: Sea Level Anomaly ('1992-01-01' - present)
## 6) <b>ADT</b>: Absolute Dynamical Topography Heights ('1992-01-01' - present)
***
***
###########################
# *** Configuration ***
<br>
###########################
```
#######################
#### Configuration ####
#######################
## Region to analyze ##
region = 11 # <<<----- Use number (11 to 24) based on table above
## Variable ##
## Select the variable to analyze from the list above (eg. 'sst','chl','wind_v','current_speed','sla','adt')
var = 'sst' # <<<----- Use short name given above. upper or lower case accepted.
## Date Period to analize ##
## Specify the period using the format: #### YYYY-MM-DD #####
## Data available specified above
## All data in monthly resolution
initial_date = '1981-01-01'
final_date = '2019-12-31'
##############################
#### End of configuration ####
##############################
#### Do not modify ####
%matplotlib inline
import sys
sys.path.append('./utils/subroutines/')
import pices
from pices import analyze_PICES_Region
analyze_PICES_Region(region, var, initial_date, final_date)
#### End of script ####
```
| github_jupyter |
# August 2021 CVE Data
This notebook will pull all [JSON Data](https://nvd.nist.gov/vuln/data-feeds#JSON_FEED) from the NVD and performs some basic data analysis of CVEd data.
## Getting Started
### Collecting Data
This cell pulls all JSON files from the NVD that we will be working with.
```
%%capture
!mkdir -p jsondata
%cd jsondata
!rm *.json
!rm *.zip
!wget https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-{2020..2021}.json.zip
!unzip -o "*.zip"
```
### Import Python Libraries
```
import calplot
import glob
import logging
import json
import matplotlib.pyplot as plt
import missingno as msno
import numpy as np
import os
import pandas as pd
import re
import uuid
import warnings
from datetime import datetime
logging.getLogger('matplotlib.font_manager').disabled = True
warnings.filterwarnings("ignore")
```
# August 2021 CVE Data
### Build Base DataFrame
This code builds a Panda dataframe from the JSON files we downloaded, removing all CVE's marked rejected.
```
row_accumulator = []
for filename in glob.glob('nvdcve-1.1-2021.json'):
with open(filename, 'r', encoding='utf-8') as f:
nvd_data = json.load(f)
for entry in nvd_data['CVE_Items']:
cve = entry['cve']['CVE_data_meta']['ID']
try:
published_date = entry['publishedDate']
except KeyError:
published_date = 'Missing_Data_JG'
try:
attack_vector = entry['impact']['baseMetricV3']['cvssV3']['attackVector']
except KeyError:
attack_vector = 'Missing_Data_JG'
try:
attack_complexity = entry['impact']['baseMetricV3']['cvssV3']['attackComplexity']
except KeyError:
attack_complexity = 'Missing_Data_JG'
try:
privileges_required = entry['impact']['baseMetricV3']['cvssV3']['privilegesRequired']
except KeyError:
privileges_required = 'Missing_Data_JG'
try:
user_interaction = entry['impact']['baseMetricV3']['cvssV3']['userInteraction']
except KeyError:
user_interaction = 'Missing_Data_JG'
try:
scope = entry['impact']['baseMetricV3']['cvssV3']['scope']
except KeyError:
scope = 'Missing_Data_JG'
try:
confidentiality_impact = entry['impact']['baseMetricV3']['cvssV3']['confidentialityImpact']
except KeyError:
confidentiality_impact = 'Missing_Data_JG'
try:
integrity_impact = entry['impact']['baseMetricV3']['cvssV3']['integrityImpact']
except KeyError:
integrity_impact = 'Missing_Data_JG'
try:
availability_impact = entry['impact']['baseMetricV3']['cvssV3']['availabilityImpact']
except KeyError:
availability_impact = 'Missing_Data_JG'
try:
base_score = entry['impact']['baseMetricV3']['cvssV3']['baseScore']
except KeyError:
base_score = '0.0'
try:
base_severity = entry['impact']['baseMetricV3']['cvssV3']['baseSeverity']
except KeyError:
base_severity = 'Missing_Data_JG'
try:
exploitability_score = entry['impact']['baseMetricV3']['exploitabilityScore']
except KeyError:
exploitability_score = 'Missing_Data_JG'
try:
impact_score = entry['impact']['baseMetricV3']['impactScore']
except KeyError:
impact_score = 'Missing_Data_JG'
try:
cwe = entry['cve']['problemtype']['problemtype_data'][0]['description'][0]['value']
except IndexError:
cwe = 'Missing_Data_JG'
try:
description = entry['cve']['description']['description_data'][0]['value']
except IndexError:
description = ''
new_row = {
'CVE': cve,
'Published': published_date,
'AttackVector': attack_vector,
'AttackComplexity': attack_complexity,
'PrivilegesRequired': privileges_required,
'UserInteraction': user_interaction,
'Scope': scope,
'ConfidentialityImpact': confidentiality_impact,
'IntegrityImpact': integrity_impact,
'AvailabilityImpact': availability_impact,
'BaseScore': base_score,
'BaseSeverity': base_severity,
'ExploitabilityScore': exploitability_score,
'ImpactScore': impact_score,
'CWE': cwe,
'Description': description
}
if not description.startswith('** REJECT **'): # disputed, rejected and other non issues start with '**'
row_accumulator.append(new_row)
nvd_2021 = pd.DataFrame(row_accumulator)
nvd_2021 = nvd_2021[(nvd_2021['Published'] > '2021-08-01') & (nvd_2021['Published'] < '2021-09-01')]
nvd_2021['Published'] = pd.to_datetime(nvd_2021['Published']).apply(lambda x: x.date())
print ('CVEs from NVD:', nvd_2021['CVE'].count())
nvdcount = nvd_2021['Published'].count()
per_day = nvdcount/31
per_day = round(per_day, 0)
print('CVEs Published Per Day:', per_day)
```
### CVEs Per Day Graph
```
nvd_data_2021 = nvd_2021['Published'].value_counts()
cg = nvd_data_2021.plot(colormap='jet', marker='.', figsize=(16, 8), markersize=2, title='CVEs Per Day')
plt.grid()
cg.set_ylabel("New CVEs")
cg.set_xlabel("Date")
plt.savefig('August2021.jpg', dpi=300, bbox_inches='tight')
```
### Most CVEs Per Day
```
nvd_2021['Published'].value_counts().head(10)
```
# CVSS 3 Breakdown
```
nvd_2021['BaseScore'] = pd.to_numeric(nvd_2021['BaseScore']);
nvd_2021['BaseScore'] = nvd_2021['BaseScore'].replace(0, np.NaN);
nvd_2021['BaseScore'].plot(kind="hist", title='CVSS Breakdown');
plt.savefig('August2021CVSS.jpg', dpi=300, bbox_inches='tight')
```
Average CVSS Score:
```
nvd_2021['BaseScore'].mean()
```
# August 2020 CVE Data
### Build Base DataFrame
This code builds a Panda dataframe from the JSON files we downloaded, removing all CVE's marked rejected.
```
row_accumulator = []
for filename in glob.glob('nvdcve-1.1-2020.json'):
with open(filename, 'r', encoding='utf-8') as f:
nvd_data = json.load(f)
for entry in nvd_data['CVE_Items']:
cve = entry['cve']['CVE_data_meta']['ID']
try:
published_date = entry['publishedDate']
except KeyError:
published_date = 'Missing_Data_JG'
try:
attack_vector = entry['impact']['baseMetricV3']['cvssV3']['attackVector']
except KeyError:
attack_vector = 'Missing_Data_JG'
try:
attack_complexity = entry['impact']['baseMetricV3']['cvssV3']['attackComplexity']
except KeyError:
attack_complexity = 'Missing_Data_JG'
try:
privileges_required = entry['impact']['baseMetricV3']['cvssV3']['privilegesRequired']
except KeyError:
privileges_required = 'Missing_Data_JG'
try:
user_interaction = entry['impact']['baseMetricV3']['cvssV3']['userInteraction']
except KeyError:
user_interaction = 'Missing_Data_JG'
try:
scope = entry['impact']['baseMetricV3']['cvssV3']['scope']
except KeyError:
scope = 'Missing_Data_JG'
try:
confidentiality_impact = entry['impact']['baseMetricV3']['cvssV3']['confidentialityImpact']
except KeyError:
confidentiality_impact = 'Missing_Data_JG'
try:
integrity_impact = entry['impact']['baseMetricV3']['cvssV3']['integrityImpact']
except KeyError:
integrity_impact = 'Missing_Data_JG'
try:
availability_impact = entry['impact']['baseMetricV3']['cvssV3']['availabilityImpact']
except KeyError:
availability_impact = 'Missing_Data_JG'
try:
base_score = entry['impact']['baseMetricV3']['cvssV3']['baseScore']
except KeyError:
base_score = '0.0'
try:
base_severity = entry['impact']['baseMetricV3']['cvssV3']['baseSeverity']
except KeyError:
base_severity = 'Missing_Data_JG'
try:
exploitability_score = entry['impact']['baseMetricV3']['exploitabilityScore']
except KeyError:
exploitability_score = 'Missing_Data_JG'
try:
impact_score = entry['impact']['baseMetricV3']['impactScore']
except KeyError:
impact_score = 'Missing_Data_JG'
try:
cwe = entry['cve']['problemtype']['problemtype_data'][0]['description'][0]['value']
except IndexError:
cwe = 'Missing_Data_JG'
try:
description = entry['cve']['description']['description_data'][0]['value']
except IndexError:
description = ''
new_row = {
'CVE': cve,
'Published': published_date,
'AttackVector': attack_vector,
'AttackComplexity': attack_complexity,
'PrivilegesRequired': privileges_required,
'UserInteraction': user_interaction,
'Scope': scope,
'ConfidentialityImpact': confidentiality_impact,
'IntegrityImpact': integrity_impact,
'AvailabilityImpact': availability_impact,
'BaseScore': base_score,
'BaseSeverity': base_severity,
'ExploitabilityScore': exploitability_score,
'ImpactScore': impact_score,
'CWE': cwe,
'Description': description
}
if not description.startswith('** REJECT **'): # disputed, rejected and other non issues start with '**'
row_accumulator.append(new_row)
nvd_2020 = pd.DataFrame(row_accumulator)
nvd_2020 = nvd_2020[(nvd_2020['Published'] > '2020-08-01') & (nvd_2020['Published'] < '2020-09-01')]
nvd_2020['Published'] = pd.to_datetime(nvd_2020['Published']).apply(lambda x: x.date())
print ('CVEs from NVD:', nvd_2020['CVE'].count())
nvdcount = nvd_2020['Published'].count()
per_day = nvdcount/31
per_day = round(per_day, 0)
print('CVEs Published Per Day:', per_day)
```
### CVEs Per Day Graph
```
nvd_data_2020 = nvd_2020['Published'].value_counts()
cg = nvd_data_2020.plot(colormap='jet', marker='.', figsize=(16, 8), markersize=2, title='CVEs Per Day')
plt.grid()
cg.set_ylabel("New CVEs");
cg.set_xlabel("Date");
plt.savefig('August2020.jpg', dpi=300, bbox_inches='tight')
```
### Most CVEs Per Day
```
nvd_2020['Published'].value_counts().head(10)
```
### CVE Heat Map
# CVSS 3 Breakdown
```
nvd_2020['BaseScore'] = pd.to_numeric(nvd_2020['BaseScore']);
nvd_2020['BaseScore'] = nvd_2020['BaseScore'].replace(0, np.NaN);
nvd_2020['BaseScore'].plot(kind="hist", title='CVSS Breakdown');
plt.savefig('August2020CVSS.jpg', dpi=300, bbox_inches='tight')
```
Average CVSS Score:
```
nvd_2020['BaseScore'].mean()
```
| github_jupyter |
## Explore The Data: Explore Continuous Features
Using the Titanic dataset from [this](https://www.kaggle.com/c/titanic/overview) Kaggle competition.
This dataset contains information about 891 people who were on board the ship when departed on April 15th, 1912. As noted in the description on Kaggle's website, some people aboard the ship were more likely to survive the wreck than others. There were not enough lifeboats for everybody so women, children, and the upper-class were prioritized. Using the information about these 891 passengers, the challenge is to build a model to predict which people would survive based on the following fields:
- **Name** (str) - Name of the passenger
- **Pclass** (int) - Ticket class (1st, 2nd, or 3rd)
- **Sex** (str) - Gender of the passenger
- **Age** (float) - Age in years
- **SibSp** (int) - Number of siblings and spouses aboard
- **Parch** (int) - Number of parents and children aboard
- **Ticket** (str) - Ticket number
- **Fare** (float) - Passenger fare
- **Cabin** (str) - Cabin number
- **Embarked** (str) - Port of embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)
**This section focuses on exploring the `Pclass`, `Age`, `SibSp`, `Parch`, and `Fare` features.**
### Read In Data
```
# Read in our data
import pandas as pd
from scipy import stats
titanic_df = pd.read_csv('../Data/titanic.csv')
titanic_df.head()
# Drop all categorical features
cat_feat = ['PassengerId', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']
titanic_df.drop(cat_feat, axis=1, inplace=True)
titanic_df.head()
```
### Explore Continuous Features
```
# Look at the general distribution of these features
titanic_df.describe()
# Look at the correlation matrix
titanic_df.corr()
```
- as from the data above, we can see correlation between `Survived` and `Pclass`, `Fare` are pretty strong. Note that negative correlation still counts. so we might need to dig further.
- there is also strong negative correlation between `Pclass` and `Fare`, which makes sense, such as 3rd class is much cheaper fare than 1st class.
```
# Look at fare by different passenger class levels
titanic_df.groupby('Pclass')['Fare'].describe()
def describe_cont_features(feature):
print('\n*** Results for {} ***'.format(feature))
print(titanic_df.groupby('Survived')[feature].describe())
print(ttest(feature))
def ttest(feature):
# seperate between Survival and Non-Survival Groups
survived = titanic_df[titanic_df['Survived'] == 1][feature]
not_survived = titanic_df[titanic_df['Survived'] == 0][feature]
tstat, pval = stats.ttest_ind(survived, not_survived, equal_var=False)
print('t-statistics: {:.1f}, p-value: {:.3}'.format(tstat, pval))
# Look at the distribution of each feature at each level of the target variable
for feature in ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']:
describe_cont_features(feature)
```
based on the statistics above, we can se that mean and median values of `Pclass` and `Fare` pretty stand out. There is significant difference between Survived and Non Survived groups in those values. But we have to keep in mind that there is a negative correlation between those 2 features.
```
# Look at the average value of each feature based on whether Age is missing
titanic_df.groupby(titanic_df['Age'].isnull()).mean()
```
looking at the above chart, `True` is for the group of missing Age. `False` is for the group with Age.
We can see that mean Age for Non Survived group is much lower than Survived group. Maybe they didn't record for passengers who were travelling in bowels of the ships. We can also see that Pclass is a bit lower and more Siblings, Spouses and much lower Fare.
| github_jupyter |
# Get vaccine coverage by ZIP Codes data from CDPH
```
%load_ext lab_black
import pandas as pd
import datetime as dt
import json
import os
import glob
import urllib.request
pd.options.display.max_columns = 50
pd.options.display.max_rows = 1000
pd.set_option("display.max_colwidth", None)
today = dt.datetime.today().strftime("%Y-%m-%d")
```
### Get the metadata from the API
```
endpoint = "https://data.chhs.ca.gov/api/3/action/package_show?id=covid-19-vaccine-progress-dashboard-data-by-zip-code"
jsonurl = urllib.request.urlopen(endpoint)
text = json.loads(jsonurl.read())
```
### Get the object with the max date
```
data = text["result"]["resources"]
```
Loop through the dict to get the file names that contain dates
```
# dates = []
# for obj in data:
# slug = obj["name"]
# # find valid dates in the name field and append them to a list
# try:
# date = pd.to_datetime(
# slug.replace("COVID-19 Vaccines by ZIP Code ", "")
# ).strftime("%Y-%m-%d")
# dates.append(date)
# except:
# pass
```
Pick out the latest date from that list
```
# latest_date_str = max(d for d in dates)
```
Loop through the objects again and match the latest one with the latest date
```
# for obj in data:
# slug = obj["name"]
# try:
# date = pd.to_datetime(
# slug.replace("COVID-19 Vaccines by ZIP Code ", "")
# ).strftime("%Y-%m-%d")
# if date == latest_date_str:
# latest_obj = obj
# else:
# pass
# except:
# pass
# latest_date = pd.to_datetime(latest_obj["created"]).strftime("%Y-%m-%d")
```
### Read that latest file into a dataframe to limit redacted rows ~~and save it in raw~~
```
url = "https://data.chhs.ca.gov/dataset/ead44d40-fd63-4f9f-950a-3b0111074de8/resource/ec32eece-7474-4488-87f0-6e91cb577458/download/covid19vaccinesbyzipcode_test.csv"
df = pd.read_csv(url)
df = df[df["vem_source"] != "No VEM Assigned"]
df = df.rename(columns={"as_of_date": "date"})
# df.to_csv("data/raw/" + latest_date + ".csv", index=False)
```
---
## Concatenate all the weekly updates
### Get all files and assign a date to each table based on the file name
```
# path = ""
# files = glob.glob(os.path.join(path, "data/raw/*.csv"))
# file_df = (
# pd.read_csv(f, low_memory=False).assign(date=os.path.basename(f)) for f in files
# )
```
### Concatenate them into one timeseries and clean update date field
```
# concat_df = pd.concat(
# file_df,
# ignore_index=True,
# )
# concat_df["date"] = pd.to_datetime(
# concat_df["date"].str.replace(".csv", "", regex=False)
# )
# concat_df[concat_df["Zip Code Tabulation Area"] == 91320]
```
---
## Export
### All updates
```
df.to_csv("data/timeseries.csv", index=False)
```
### Latest update
```
df[df["date"] == df["date"].max()].to_csv("data/latest.csv", index=False)
```
| github_jupyter |
## This notebook contains prototyping work for implementing the viterbi decode algorithm
```
import numpy as np
import librosa
import matplotlib.pyplot as plt
def redistribute_trans_table(A):
for i in range(5,A.shape[1]):
current_col = A[:,i]
idx = (-current_col).argsort()[:2]
second_max_val = current_col[idx[1]]
current_col[idx[0]] = second_max_val
new_array = (current_col/current_col.sum(axis=0))
A[:,i] = new_array
for i in range(0,5):
current_col = A[:,i]
idx = (-current_col).argsort()[:1]
max_val = current_col[idx[0]]
new_array = np.zeros_like(A[5:,i])
new_array = np.random.uniform(low=0,high=max_val,size=new_array.shape[0])
A[5:,i] = new_array
A[:,i] = A[:,i]/(A[:,i].sum(axis=0))
return A
def viterbi(A, B, sequence, B_weight=1):
'''
~~~~ ARGUMENTS ~~~~
sequence : list
- a sequence of labels
A : numpy array
- transition table
- shape = [number of notes + 1 for <start>, number of notes]
- NOTE: the <start> token should be indexed at the last row
B : numpy array
- emission table
- shape = [number of notes, number of possible labels]
'''
# let's work in log space
A = np.log(A)
B = np.log(B*B_weight)
num_notes = B.shape[0]
# create empty viterbi matrix and backpointer matrix
viterbi = np.full((num_notes, len(sequence)), None)
bp = np.full((num_notes, len(sequence)), None)
# Compute the first column
first_label = sequence[0]
start_token = A.shape[0]
for n in range(num_notes):
viterbi[n,0] = A[-1,n] + B[n,first_label]
bp[n,0] = start_token
for w in range(1, len(sequence)):
for n in range(num_notes):
viterbi[n,w], bp[n,w] = compute_viterbi_val(n=n, w=w, viterbi=viterbi, A_prev=A[:,n], B_prev=B[n,w]) #transitions from previous note to current note
# Find maximum value of last column of viterbi
max_idx = np.argmax(viterbi[:,-1])
# Trace back maximum indices in backpointer table
note_sequence = [max_idx]
next_note = bp[max_idx,-1]
for i in range(1,len(sequence)):
reverse_i = len(sequence)-i
# print('reverse_i : {}'.format(reverse_i))
note_sequence.append(bp[next_note, reverse_i])
next_note = bp[next_note, reverse_i]
# print('next_note: {}'.format(next_note))
# for i in range(0,len(sequence)):
# reverse_i = len(sequence)-i-1
# print(f'[{i-1},{reverse_i}]')
# note_sequence.append(bp[note_sequence[i-1], reverse_i])
note_sequence.reverse()
return note_sequence, viterbi, bp
def compute_viterbi_val(n, w, viterbi, A_prev, B_prev):
# Compute first viterbi value
current_val = viterbi[0,w-1] + A_prev[0] + B_prev
max_val = current_val
bp = 0
# Loop through rest of values
for i, v in enumerate(list(viterbi[:,w-1])):
current_val = v + A_prev[i] + B_prev
if current_val > max_val:
max_val = current_val
bp = i
return max_val, bp
def make_emission_table():
# Numpy array
em_table = np.random.uniform(0,1,(32,12000))
new_array = (em_table/em_table.sum(axis=0))
return new_array
def onset_label(onset, spectrogram=None):
'''
Function that takes in 1D onset array and spectrogram and labels each onset with the spectrogram that most closely
matches the emission probability table
INPUTS: 1D Onset Array , computed spectrogram
OUTPUTS: 1D Array of same length as onset array corresponding to column indices of emission probability table
'''
# X will probably need to be determined by spectrogram clusters , set locally just to highlight
X = 12000
return np.random.randint(0,X,len(onset))
def onset_time(processed_path):
# Load the songs and the notes arrays one at a time
# for idx in range (len(song_paths)):
# Load the song
y, sr = librosa.load(processed_path)
# resample the song if it isn't sr=22050 (for consistent sizing)
if not sr == 22050:
y = librosa.resample(y, sr, 22050)
sr = 22050
#source seperation, margin can be tuned
y_harmonic, y_percussive = librosa.effects.hpss(y, margin=2.0)
# Set Hop_len
hop_len = 520
onset_frame_backtrack = librosa.onset.onset_detect(y_harmonic, sr = sr, hop_length = hop_len, backtrack=True)
onset_times = librosa.frames_to_time(onset_frame_backtrack)
return y_harmonic, onset_times
# Load and modify transition table
A = np.load('trans_prob_table.npy')
# A_redistributed = redistribute_trans_table(A)
# Display
plt.figure()
plt.imshow(A)
plt.title('Transition Table')
plt.figure()
plt.imshow(A)
plt.title('Redistributed')
song_path = r'X:\Training Data\Unprocessed\Angevil Hero II\1. John 5 - 27 Needles\song.ogg'
_, onset_times = onset_time(song_path)
print(onset_times)
onset = [0 for _ in range(1000)]
B = make_emission_table()
sequence = onset_label(onset)
note_sequence, v, bp = viterbi(A_redistributed, B, sequence)
# print(note_sequence)
def onset_time_bins(onset_times):
otb = [int(x) for x in onset_times*100]
return otb
otb = onset_time_bins(onset_times)
print(otb)
note_sequence
import matplotlib.pyplot as plt
def make_emission_table():
# Numpy array
em_table = np.random.uniform(0,1,(32,12000))
new_array = (em_table/em_table.sum(axis=0))
return new_array
def onset_label(onset, spectrogram=None):
'''
Function that takes in 1D onset array and spectrogram and labels each onset with the spectrogram that most closely
matches the emission probability table
INPUTS: 1D Onset Array , computed spectrogram
OUTPUTS: 1D Array of same length as onset array corresponding to column indices of emission probability table
'''
# X will probably need to be determined by spectrogram clusters , set locally just to highlight
X = 12000
return np.random.randint(0,X,len(onset))
onset = [0 for _ in range(1000)]
A = np.load('trans_prob_table.npy')
B = make_emission_table()
sequence = onset_label(onset)
note_sequence = viterbi(A, B, sequence)
print(note_sequence)
plt.imshow(A)
def redistribute_trans_table(A):
for i in range(5,A.shape[1]):
current_col = A[:,i]
idx = (-current_col).argsort()[:2]
second_max_val = current_col[idx[1]]
current_col[idx[0]] = second_max_val
new_array = (current_col/current_col.sum(axis=0))
A[:,i] = new_array
for i in range(0,5):
current_col = A[:,i]
idx = (-current_col).argsort()[:1]
max_val = current_col[idx[0]]
new_array = np.zeros_like(A[5:,i])
new_array = np.random.uniform(low=0,high=max_val,size=new_array.shape[0])
A[5:,i] = new_array
A[:,i] = A[:,i]/(A[:,i].sum(axis=0))
return A
# A_fully_redistributed = redistribute_trans_table(A)
A_redistributed = redistribute_trans_table(A)
plt.imshow(A_redistributed)
print(np.max(A_redistributed))
```
| github_jupyter |
# Two Degree-of-Freedom Caldera Model
## Introduction
Dynamical matching is an interesting chemical dynamical phenomenon that occurs in a variety of organic chemical reactions. A caldera PES arises in many organic chemical reactions, such as the vinylcyclopropane-cyclopentene rearrangement \cite{baldwin2003,gold1988}, the stereomutation of cyclopropane \cite{doubleday1997}, the degenerate rearrangement of bicyclo[3.1.0]hex-2-ene \cite{doubleday1999,doubleday2006} or that of 5-methylenebicyclo[2.1.0]pentane \cite{reyes2002}. It is characterized by a flat region or shallow minimum at its center surrounded by potential walls and multiple symmetry related index one saddle points that allow entrance and exit from this intermediate region. This shape of the potential resembles the collapsed region within an errupted volcano (caldera), and this is the reason that Doering \cite{doering2002} and co-workers refer to this type of potential as a caldera.
The manifestation of the dynamical matching phenomenon is essentially a statement of momentum conservation and Newton's first law of motion. It is observed that a trajectory entering the Caldera from a channel corresponding to a high energy index-1 saddle (reactant) experiences little force in the caldera due to the ''flatness'' of the PES, and it exits through the diametrically opposing low energy index-1 saddle (product). Consequently, this mechanism determines to a considerable extent the outcome of the chemical reaction. However, not all trajectories entering the caldera experience dynamical matching. It is observed that some trajectories may interact with the shallow potential well and become temporarily trapped in the region. This can dramatically influence the manner in which they exit from the well.
A detailed study of the trajectory behavior in a two DoF caldera PES was given in \cite{collins2014}, where a more general discussion of caldera-like PESs in organic reactions is also presented. Further work elucidating the phenomena of dynamical matching and trapping in this caldera model was carried out in \cite{ katsanikas2018,katsanikas2019}. We will describe the results in these papers in more detail when we describe the Hamiltonian model in the next section.
In this chapter we describe the phase space mechanism that controls dynamical matching. We show that dynamical matching is controlled by a heteroclinic intersection between the unstable manifold of a periodic orbit controlling entrance to the caldera and the stable manifold of a periodic orbit in the region of the shallow minimum. When a heteroclinic connection exists, trajectories that enter the caldera are transported to the shallow minimum, and they experience temporary trapping in this region. When there is no heteroclinic connection, trajectories enter and exit the caldera without interacting with the region of the central minimum. Knowledge of this phase space mechanism is significant because it allows us to predict existence, and non-existence, of dynamical matching.
This chapter is outlined as follows. In Section \ref{sec:model} we describe the Caldera PES that we use in this work, its critical points and stability, and the resulting Hamiltonian model. Section \ref{sec:DM_mech} is devoted to analyzing the phase space structures that govern dynamical matching.
## Development of the Problem
\label{sec:model}
We give a brief description of the caldera potential energy surface (Fig. \ref{fig:equi1}) and Hamiltonian as described in \cite{collins2014}. The caldera potential has a stable equilibrium point at the center, referred to as the central minimum. This potential has an axis of symmetry, the y-axis. We have also the existence of potential walls around the central minimum. On these potential walls we encounter four 1-index saddles (two for lower values of energy, referred to as the lower saddles, and two for higher values of energy, referred to as the upper saddles). In this chapter we consider the stretched version of the caldera potential:
\begin{equation*}
\label{eq1}
V(x,y)=c_1(y^2+(\lambda x)^2) + c_2y - c_3((\lambda x)^4 + y^4 - 6 (\lambda x)^2 y^2)
\end{equation*}
The potential parameters are $c_1=5$, $c_2=3$,$c_3=-3/10$ and $0<\lambda \leq 1$. The classical symmetric caldera PES \cite{collins2014,katsanikas2018} corresponds to $\lambda = 1$ and is shown in the upper left hand panel of Fig. \ref{fig:equi}.
The Hamiltonian for the system with two DoF is the sum of kinetic plus potential energy:
\begin{equation}
H(x,y,p_x,p_y) = \frac{p_x^2}{2m_x} + \frac{p_y^2}{2m_y} + V(x,y)
\label{eq2}
\end{equation}
where $V(x,y)$ is the Caldera PES in Eq. \eqref{eq1}, and $m_x$, $m_y$ are the masses of the $x$ and $y$ DoF respectively. In this work, for simplicity, we take $m_x = m_y = 1$. Hamilton's equations of motion are given by:
\begin{equation}
\begin{cases}
\dot x = \dfrac{\partial H} {\partial p_x} = \dfrac{p_x}{m_x} \\[.4cm]
\dot y = \dfrac{\partial H} {\partial p_y} = \dfrac{p_y}{m_y} \\[.4cm]
\dot p_x = -\dfrac{\partial H} {\partial x} = 2 \lambda \, (\lambda x) \left[2c_3 \left((\lambda x)^2 - 3 y^2 \right) - c_1 \right] \\[.4cm]
\dot p_y = -\dfrac {\partial H} {\partial y} = 2 y \left[ 2 c_3 \left(y^2 - 3 (\lambda x)^2\right) - c_1 \right] - c_2
\end{cases}
\label{eq3}
\end{equation}
## Revealing the Phase Space Structures
In Fig. \ref{fig:equi} we show the contours and the equilibrium points of the potential for different values of $\lambda$, for example $\lambda=0.8$, $\lambda=0.6$ and $\lambda=0.2$. Table \ref{tab:ta08} summarises the positions and energies of the upper index-1 saddles for different values of $\lambda$. We observe that the positions of the index-1 saddles move away from the center of the Caldera as we decrease the parameter $\lambda$. The position of the central minimum is $(x,y) = (0,-0.297)$ with energy $E = -0.448$ for all values of the stretching parameter $\lambda$.
<img width="560" height="315" src="figures/caldera_pes_lambda_1.png">
\label{fig:equi1}
\caption{Caldera potential energy surface for $\lambda=1$.}
<img width="560" height="315" src="figures/equi-combo.png">
\label{fig:equi}
\caption{The stable equilibrium point in the center (depicted by a black point), the upper saddles (depicted by red points), the lower saddles (depicted by blue points) and the contours of the potential for $\lambda = 1$ (upper left panel), $\lambda = 0.8$ (upper right panel), $\lambda = 0.6$ (lower left panel) and $\lambda = 0.2$ (lower right panel).}
| Equilibrium point | x | y | $\lambda$ |
|-------------------------|----------|---------|----- |
| Upper LH index-1 saddle | -2.149 | 2.0778 | 1 |
| Upper RH index-1 saddle | 2.149 | 2.0778 | 1 |
| Upper LH index-1 saddle | -2.6862 | 2.0778 | 0.8 |
| Upper RH index-1 saddle | 2.6862 | 2.0778 | 0.8 |
| Upper LH index-1 saddle | -3.5815 | 2.0778 | 0.6 |
| Upper RH index-1 saddle | 3.5815 | 2.0778 | 0.6 |
| Upper LH index-1 saddle | -10.7446 | 2.0778 | 0.2 |
| Upper RH index-1 saddle | 10.7446 | 2.0778 | 0.2 |
\label{tab:ta08}
\caption{The upper index-1 saddles of the PES in Eq. \ref{eq1} ("RH" and "LH" are the abbreviations for right hand and left hand respectively) for different values of $\lambda$. The energy for each of the cases is $E = 27.0123$.}
### Lagrangian Descriptors for revealing Phase Space Structures
In order to reveal the phase space structures that are responsible for the mechanism that allows and prevents dynamical matching, we use in this work the method of Lagrangian descriptors (LDs), see e.g. \cite{mancho2013lagrangian,lopesino2017,naik2019a}. Lagrangian descriptors is a trajectory-based scalar diagnostic that has been developed in the nonlinear dynamics literature to explore the geometrical template of phase space structures that characterizes qualitatively distinct dynamical behavior. Details on how they are applied for revealing phase space structures in caldera-like PESs are described in \cite{KGW2019,KGW2019a}. In this chapter we focus on presenting the results relevant to dynamical matching.
## Implications for Reaction Dynamics
\label{sec:DM_mech}
As we have described in the introduction, the caldera gets its name from the shape of the PES. However, transport across the caldera is a dynamical phenomenon governed by the template of geometrical structures in phase space, and dynamical matching is just one particular type of dynamical phenomenon that we are considering in this chapter. First, we describe the phase space structures that mediate transport into the caldera.
For a two DoF system, the fixed energy surface is three dimensional. For energies above that of the upper saddles an unstable periodic orbit exists in the energy surface. This is a consequence of the Lyapunov subcenter manifold theorem \cite{moser1976, weinstein1973, rabinowitz1982}. In a fixed energy surface, these periodic orbits have two dimensional stable and unstable manifolds. Trajectories move away from the periodic orbits along the direction of the unstable manifold in forward time. In the upper left panel of Fig. \ref{fig:fig_panel} we show a segment of the unstable manifold of the upper right-hand saddle directed towards the interior of the caldera.
The region of the central minimum of the caldera may also contain unstable periodic orbits. The stable manifolds of these periodic orbits direct trajectories towards the central minimum. In the upper left panel of Fig. \ref{fig:fig_panel} we show a segment of the stable manifold of an unstable periodic orbit in the region of the central minimum directed away from the central minimum.
If the stable manifold of a periodic orbit in the central minimum intersects the unstable manifolds of one of the upper saddles we have a mechanism for trajectories to enter the caldera and be directed towards the region of the central minimum. In dynamical systems terminology this is referred to as a heteroclinic connection. This would inhibit dynamical matching, as trajectories entering the caldera would exhibit (temporary) trapping in the region of the central minimum. If the heteroclinic connection breaks, as might occur if a parameter is varied, the mechanism for directing trajectories towards the regions of the central minimum no longer exists, and dynamical matching is possible. Hence, a heteroclinic bifurcation is the critical phase space structure that inhibits or allows dynamical matching, which we now present.
In order to explore the formation of a heteroclinic intersection between any stable manifold coming from an UPO of the central region of the Caldera and the unstable manifold of the UPO of the upper-right index-1 saddle, as the stretching parameter of the Caldera PES is varied, we probe the phase space structures in the following Poincare surface of section:
\begin{equation}
\mathcal{U}^{+}_{x,p_x} = \lbrace (x,y,p_x,p_y) \in \mathbb{R}^4 \;|\; y = 1.88409 \; ,\; p_y > 0 \;,\; E = 29 \rbrace
\label{psos}
\end{equation}
In the middle-left panel of Fig. \ref{fig:fig_panel}, we observe that there is a critical value of the stretching parameter ($\lambda=0.778$) for the formation of this heteroclinic connection. For values of the stretching parameter above the critical value there is no heteroclinic connection between any stable manifold coming from an UPO of the central region of the Caldera and the unstable manifold of the UPO of the upper index-1 saddle (see the upper left panel of Fig. \ref{fig:fig_panel}). The non-existence of these heteroclinic connections results in the phenomenon of dynamical matching. In this case, if we integrate an initial condition inside the region of the unstable manifold of UPO of the upper-right index-1 saddle forward and backward in time, we see in the upper right panel of Fig. \ref{fig:fig_panel} that the resulting trajectory comes from the region of the upper-right index-1 saddle and exits the caldera through the region of the opposite lower saddle without any interaction with the central area of the caldera.
Now, for values of the stretching parameter equal or above the critical value we have the formation of heteroclinic connections between the stable manifold coming from an UPO of the central region of the Caldera and the unstable manifold of the UPO of the upper-right index-1 saddle, (see middle and lower left panels of Fig. \ref{fig:fig_panel}). This heteroclinic connection destroys the dynamical matching mechanism because many trajectories become trapped inside the lobes between the two invariant manifolds. We can see this better if we choose an initial condition inside a lobe, as we illustrate in the middle and lower left panels of Fig. \ref{fig:fig_panel} and integrate it forward and backward. We observe that the resulting trajectory is temporarily trapped in the central area of the caldera before it exits from this area, see the middle and lower right panels of Fig. \ref{fig:fig_panel}.
<img width="560" height="315" src="figures/posld-combo.png">
\label{fig:fig_panel}
\caption{Phase space structures calculated on the Poincar\'e section described in Eq. \eqref{psos} located in the vicinity of the UPO of the upper-right index-1 saddle. We have similar structures in the vicinity of the UPO of the upper-left index-1 saddle because of the symmetry of the potential. We illustrate the formation of an heteroclinic connection, as the stretching parameter of the Caldera PES is varied, between a stable manifold (blue curve) of an UPO of the central region of the Caldera and the unstable manifold (red curve) of the UPO associated to the upper index-1 saddle. The first row corresponds to $\lambda = 0.8$, the second row is for the critical stretching value $\lambda = 0.778$, and for the third row we use $\lambda = 0.7$. In the second column, we capture the inhibition of dynamical matching by depicting the projection onto configuration space of the trajectory of two initial conditions marked as a yellow dot (outside the lobe - in all rows) and a yellow diamond (inside the lobe - in the second and third row). In the first row, forward and backward evolution of the trajectory are represented in black and green respectively. In the second and third row, the red line indicate the part of the trajectories at backward integration that correspond to both of them, circle and diamond. In addition, the black and blue line indicate the part of the trajectories at forward integration that correspond to the circle and diamond respectively. The magenta curve represents the energy boundary in all rows.}
# References
\bibliography{caldera2c}
| github_jupyter |
```
import numpy as np
import pandas as pd
amplifiers = np.genfromtxt('amplifiers_0.csv',delimiter=',').astype(int)
print(amplifiers)
normals = 1-amplifiers
print(normals)
weights_biased = np.atleast_2d(np.genfromtxt('weights-biased_0.csv', delimiter=','))
weights_unbiased = np.atleast_2d(np.genfromtxt('weights-unbiased_0.csv', delimiter=','))
condorcet_biased = np.atleast_2d(np.genfromtxt('condorcet-biased_0.csv', delimiter=','))
unanimity_biased = np.atleast_2d(np.genfromtxt('unanimity_0.csv', delimiter=','))
n_experiments = 200
all_weights_ub_amp_means = []
all_weights_ub_namp_means = []
all_weights_amp_means = []
all_weights_namp_means = []
all_weights_ub_means = []
all_weights_ub_stds = []
all_condorcet_amp_means = []
all_condorcet_namp_means = []
all_condorcet_means = []
all_condorcet_stds = []
all_unanimity_amp_means = []
all_unanimity_namp_means = []
for i in range(n_experiments):
amplifiers = np.genfromtxt('amplifiers_%d.csv' %i ,delimiter=',').astype(int)
normals = 1-amplifiers
if(sum(amplifiers) == 0 or sum(normals) == 0):
continue
weights_biased = np.atleast_2d(np.genfromtxt('weights-biased_%d.csv' % i, delimiter=','))
#print("----- WEIGHTS ")
#print("----- amplifiers")
#print(weights_biased[:,amplifiers.astype(bool)])
#print(weights_biased[:,amplifiers.astype(bool)].mean())
all_weights_amp_means += [weights_biased[:,amplifiers.astype(bool)].mean()]
#print("----- non amplifiers")
#print(weights_biased[:,normals.astype(bool)])
#print(weights_biased[:,normals.astype(bool)].mean())
all_weights_namp_means += [weights_biased[:,normals.astype(bool)].mean()]
weights_unbiased = np.atleast_2d(np.genfromtxt('weights-unbiased_%d.csv' % i, delimiter=','))
all_weights_ub_amp_means += [weights_unbiased[:,amplifiers.astype(bool)].mean()]
all_weights_ub_namp_means += [weights_unbiased[:,normals.astype(bool)].mean()]
all_weights_ub_means += [weights_unbiased.mean()]
all_weights_ub_stds += [weights_unbiased.std()]
condorcet_biased = np.atleast_2d(np.genfromtxt('condorcet-biased_%d.csv' % i, delimiter=','))
#print("----- CONDORCET ")
#print("----- amplifiers")
#print(condorcet_biased[:,amplifiers.astype(bool)])
#print(condorcet_biased[:,amplifiers.astype(bool)].mean())
all_condorcet_amp_means += [condorcet_biased[:,amplifiers.astype(bool)].mean()]
#print("----- non amplifiers")
#print(condorcet_biased[:,normals.astype(bool)])
#print(condorcet_biased[:,normals.astype(bool)].mean())
all_condorcet_namp_means += [condorcet_biased[:,normals.astype(bool)].mean()]
all_condorcet_means += [condorcet_biased.mean()]
all_condorcet_stds += [condorcet_biased.std()]
unanimity_biased = np.atleast_2d(np.genfromtxt('unanimity_%d.csv' % i, delimiter=','))
all_unanimity_amp_means += [unanimity_biased[:,amplifiers.astype(bool)].mean()]
all_unanimity_namp_means += [unanimity_biased[:,normals.astype(bool)].mean()]
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'CMR10'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 14
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 13
plt.rcParams['ytick.labelsize'] = 13
plt.rcParams['legend.fontsize'] = 13
plt.rcParams['figure.titlesize'] = 12
isseorange = (1.0, 0.57647, 0.039216)
#\definecolor{issegrey}{RGB}{80,85,82}
issegrey = (80.0 / 255, 85.0 / 255, 82.0 / 255)
# Credit: Josh Hemann
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import namedtuple
n_groups = 4 # weighted and condorcet
# within each group amplifiers and non-amplifiers correspond to men and women
all_weights_ub_amp_means = np.array(all_weights_ub_amp_means)
all_weights_ub_namp_means = np.array(all_weights_ub_namp_means)
all_weights_amp_means = np.array(all_weights_amp_means)
all_weights_namp_means = np.array(all_weights_namp_means)
all_weights_ub_means = np.array(all_weights_ub_means)
all_weights_ub_stds = np.array(all_weights_ub_stds)
all_condorcet_amp_means = np.array(all_condorcet_amp_means)
all_condorcet_namp_means = np.array(all_condorcet_namp_means)
all_condorcet_means = np.array(all_condorcet_means)
all_condorcet_stds = np.array(all_condorcet_stds)
all_unanimity_amp_means = np.array(all_unanimity_amp_means)
all_unanimity_namp_means = np.array(all_unanimity_namp_means)
means_amp = (all_weights_ub_amp_means.mean(), all_weights_amp_means.mean(), all_condorcet_amp_means.mean(), all_unanimity_amp_means.mean())
std_amp = (all_weights_ub_amp_means.std(), all_weights_amp_means.std(), all_condorcet_amp_means.std(), all_unanimity_amp_means.std())
means_namp = (all_weights_ub_namp_means.mean(), all_weights_namp_means.mean(), all_condorcet_namp_means.mean(), all_unanimity_namp_means.mean())
std_namp = (all_weights_ub_namp_means.std(), all_weights_namp_means.std(), all_condorcet_namp_means.std(), all_unanimity_namp_means.std())
fig, ax = plt.subplots()
column_width = 4.803 # in inches
fig_width = column_width * 1.25
fig_height = fig_width / 1.618
means_amp = 3. - np.array(means_amp)
means_namp = 3. - np.array(means_namp)
index = np.arange(n_groups)
bar_width = 0.23
opacity = 0.85
error_config = {'ecolor': '0.3'}
rects1 = ax.bar(index, means_amp, bar_width,
alpha=opacity, color=isseorange,hatch="/",
yerr=std_amp, error_kw=error_config,
label='Amplifiers')
rects2 = ax.bar(index + bar_width, means_namp, bar_width,
alpha=opacity, color=issegrey,hatch="\\",
yerr=std_namp, error_kw=error_config,
label='Non-amplifiers')
ax.set_xlabel(r'\textbf{Voting Method}')
ax.set_ylabel(r'\textbf{Satisfaction degrees}')
ax.set_title('Mentor Matching: 25 \% Amplifiers')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(('WCSP Unbiased', 'WCSP Biased', 'Condorcet', 'Pareto'))
plt.ylim([0,2.5])
ax.legend(loc=0)
fig.tight_layout()
fig.set_size_inches(fig_width, fig_height)
plt.savefig("mentor-matching-comparison-025.pdf")
plt.show()
3 - means_amp
print(np.array(all_weights_ub_stds).mean(), np.array(all_weights_ub_stds).std())
print(np.array(all_condorcet_stds).mean(), np.array(all_condorcet_stds).std())
plt.hist(all_weights_ub_stds, bins=np.linspace(0.5,2, 50))
plt.hist(all_condorcet_stds, color='r', bins=np.linspace(0.5,2, 50))
# perform a quick normality test
from scipy import stats
k2, p = stats.shapiro(all_weights_ub_stds)
alpha = 1e-3
print("p = {:g}".format(p))
if p < alpha: # null hypothesis: x comes from a normal distribution
print("For weights, the null hypothesis can be rejected")
else:
print("For weights, the null hypothesis cannot be rejected")
# perform a quick normality test
from scipy import stats
k2, p = stats.shapiro(all_condorcet_stds)
alpha = 1e-3
print("p = {:g}".format(p))
if p < alpha: # null hypothesis: x comes from a normal distribution
print("For condorcet, the null hypothesis can be rejected")
else:
print("For condorcet, the null hypothesis cannot be rejected")
import scipy.stats as st
[t, prob] = st.ttest_ind(all_weights_ub_stds, all_condorcet_stds)
print(t, prob)
if prob < 0.05:
print("SIGNIFICANT")
else:
print("INSIGNIFICANT")
print(np.round(all_weights_ub_stds.mean(), 2))
print(np.round(all_weights_ub_stds.std(), 2))
print(np.round(all_condorcet_stds.mean(), 2))
print(np.round(all_condorcet_stds.std(), 2))
print(np.round( (3. - all_weights_ub_means).mean(), 2))
print(np.round((3. - all_weights_ub_means).std(), 2))
print(np.round( (3. - all_condorcet_means).mean(), 2))
print(np.round((3. - all_condorcet_means).std(), 2))
```
| github_jupyter |
# Example: CanvasXpress scatter2d Chart No. 4
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/scatter2d-4.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="scatter2d4",
data={
"y": {
"vars": [
"s1",
"s2",
"s3",
"s4",
"s5",
"s6",
"s7",
"s8",
"s9",
"s10",
"s11",
"s12",
"s13",
"s14",
"s15",
"s16",
"s17",
"s18",
"s19",
"s20",
"s21"
],
"smps": [
"U-Trial 1",
"U-Trial 2",
"U-Trial 3",
"S-Trial 1",
"S-Trial 2",
"S-Trial 3"
],
"data": [
[
38.4,
27.7,
25.7,
53.1,
30.6,
30.2
],
[
46.2,
57.2,
41.9,
54.7,
43.3,
56.7
],
[
72.5,
57.9,
51.9,
74.2,
53.4,
42.4
],
[
38,
38,
32.2,
49.6,
37.4,
34.4
],
[
82.8,
57.9,
64.7,
53.6,
48.6,
44.8
],
[
33.9,
32,
31.4,
51.3,
35.5,
42.9
],
[
50.4,
40.6,
40.1,
44.1,
46.9,
42.7
],
[
35,
33.1,
43.2,
34,
26.4,
24.8
],
[
32.8,
26.8,
33.9,
34.5,
25.1,
25.1
],
[
60.1,
53.2,
40.4,
59.1,
87.1,
59.2
],
[
75.1,
63.1,
58,
67.3,
43.8,
42.2
],
[
57.6,
57.7,
61.5,
75.5,
126.6,
48.4
],
[
55.5,
63.3,
44.6,
41.1,
41.8,
32
],
[
49.5,
45.8,
35.3,
52.2,
53.8,
48.1
],
[
40.9,
35.7,
37.2,
28.3,
26,
33.7
],
[
44.3,
46.8,
39.4,
74.9,
45.3,
42.6
],
[
93.8,
91.9,
77.4,
77.5,
55.8,
54.9
],
[
47.9,
59.9,
52.8,
50.9,
58.6,
64.5
],
[
75.2,
54.1,
63.6,
70.1,
44,
43.1
],
[
46.2,
39.3,
56.6,
60.3,
47.8,
52.8
],
[
56.3,
45.8,
58.9,
59.9,
36.8,
44.3
]
]
},
"m": {
"Name": "Scents",
"Description": "Data on the time subjects required to complete a pencil and paper maze when they were smelling a floral scent and when they were not.",
"Reference": "Hirsch, A. R., and Johnston, L. H. Odors and Learning, Smell & Taste Treatment and Research Foundation, Chicago."
},
"z": {
"Sex": [
"M",
"F",
"M",
"M",
"M",
"F",
"F",
"F",
"M",
"F",
"F",
"F",
"F",
"M",
"M",
"M",
"M",
"M",
"F",
"F",
"M"
],
"Smoker": [
"N",
"Y",
"N",
"N",
"N",
"Y",
"N",
"N",
"N",
"N",
"Y",
"Y",
"Y",
"Y",
"N",
"N",
"Y",
"N",
"Y",
"N",
"N"
],
"Opinion": [
"pos",
"neg",
"pos",
"neg",
"neg",
"pos",
"pos",
"pos",
"pos",
"indiff",
"pos",
"indiff",
"pos",
"indiff",
"indiff",
"pos",
"neg",
"neg",
"pos",
"neg",
"neg"
],
"Age": [
23,
43,
43,
32,
15,
37,
26,
35,
26,
31,
35,
55,
25,
39,
25,
26,
33,
62,
54,
38,
65
],
"Order": [
1,
2,
1,
2,
1,
2,
1,
2,
1,
2,
1,
2,
1,
2,
1,
2,
1,
2,
1,
2,
1
]
}
},
config={
"citation": "Hirsch, A. R., and Johnston, L. H. Odors and Learning, Smell & Taste Treatment and Research Foundation, Chicago.",
"graphType": "Scatter2D",
"histogramStat": "count",
"legendBox": True,
"setMaxX": 100,
"setMaxY": 150,
"setMinX": 0,
"setMinY": 0,
"shapeBy": "Smoker",
"showTransition": False,
"sizeBy": "Age",
"theme": "CanvasXpress",
"title": "Data on the time subjects required to complete a pencil and paper mazewhen they were smelling a floral scent and when they were not.",
"xAxis": [
"U-Trial 1",
"U-Trial 2",
"U-Trial 3"
],
"xAxisExact": True,
"xAxisHistogramShow": True,
"yAxis": [
"S-Trial 1",
"S-Trial 2",
"S-Trial 3"
],
"yAxisExact": True,
"yAxisHistogramShow": True
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="scatter2d_4.html")
```
| github_jupyter |
# MNIST
```
import torch
from torch import nn, optim
from torchvision import datasets, transforms
import numpy as np
%matplotlib inline
from matplotlib import pyplot as plt
```
### Description
Classification of hand-written digits (MNIST dataset) using a simple multi-layer perceptorn architecture implemented in PyTorch with the `nn.Sequential` module.
### Content
1. Loading a training dataset
1. Apply transformations
2. Creating a data loader
2. Defining a sequential neural network
3. Training a neural network
## Data Set
### MNIST
The MNIST data set is a large database of handwritten digits, often used as toy model to validate machine learning and deep learning algorithms in image recognition and classification.
### Loading the Data Set
Let's start by loading the MNIST dataset. Since we want to use the data (both images and associated labels) with PyTorch we have to load the dataset into `torch.tensor`s. Additionally we want our data to be normalised in order to avoid large variations in the data; we can compose the two transformations and apply them directly when loading the dataset.
```
transform = transforms.Compose(
[
transforms.ToTensor(), # Array to PyTorch tensor
transforms.Normalize((0.5,), (0.5,)), # Normalisation (mean and std)
]
)
trainset = datasets.MNIST('data', download=True, train=True, transform=transform)
```
*Note*: some `torchvision.transforms` apply directly to the loaded image and therefore go before `transforms.ToTensor()` while other `torchvision.transforms` are applied directly to `torch.tensors` and therefore go after `transforms.ToTensor()`.
Once the dataset is loaded and transformed, we can define a `DataLoader`. This is an utility class that allows to split the dataset in *minibatches*, used for training.
```
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, drop_last=True)
```
### Visualizing Images and Labels
Let's look at the first batch of images:
```
dataiter = iter(trainloader) # Create an iterator from trainloader
images, labels = next(dataiter)
fig = plt.figure(figsize=(8,8))
for idx in range(64):
ax = fig.add_subplot(8, 8, idx + 1, xticks=[], yticks=[])
# Un-normalize image
img = images[idx].numpy().squeeze() * 0.5 + 0.5
plt.imshow(img, cmap='Greys')
ax.set_title(labels[idx].item(), fontdict={"fontsize": 12})
plt.tight_layout()
plt.show()
```
*Note*: Images have been normalised when loading the dataset. In order to visualise the original images we have to (manually) undo this transformation.
## Sequential Neural Network with PyTorch
### Define a Sequential NN
PyTorch allows to define simple (sequential) NN architectures very easily using `nn.Sequential`. `nn.Sequential` takes a list of layers and automatically build a sequential NN. The following architecture defines a multi-layer perceptron (MLP) with an input layer, a hidden layer and an output layer and using ReLU activation functions between layers:
```
model = nn.Sequential(nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 64),
nn.ReLU(),
nn.Linear(64, 10))
```
Printing the model will show its composition:
```
print(model)
```
### Test Forward Pass
In order to check that the model is defined correctly we can perfom a forward pass with a batch of images. We can also visualize how the model performs before training by plotting predicting class probabilities:
```
images, labels = next(dataiter)
def showclassp(images, labels, model):
"""
Plot class probabilities for a batch of images and labels.
"""
# Defint a figure
fig = plt.figure(figsize=(12,12))
# Flatten image for forward pass
images = images.view(images.shape[0], -1)
# Compute predictions
with torch.no_grad(): # Do not track gradients
# Perform forward pass
out = model(images)
# Compute class probabilities
p = nn.functional.softmax(out, dim=1).numpy()
# Loop over images and labels in a batch
for idx in range(64):
# Create subplot
ax = fig.add_subplot(8, 8, idx + 1, xticks=range(10), yticks=[0, 1])
# Plot all class probabilities for given image
for i in range(10):
if labels[idx] == i:
if labels[idx] == np.argmax(p[idx]):
plt.bar(i, p[idx,i], color="g")
else:
plt.bar(i, p[idx,i], color="r")
else:
plt.bar(i, p[idx,i], color="k")
plt.ylim([0,1.25])
ax.set_title(labels[idx].item(), fontdict={"fontsize": 12})
plt.tight_layout()
plt.show()
showclassp(images, labels, model)
```
The graph show the predicted probability for each class (before training). Green bars represent a correct classification while red bars represent an incorrect classification. Since the weights of the network are initialised at random, every class has a similar probability, close to $\frac{1}{10}$.
### Training
To train our model we need to define a loss function. For this multi-class classification problem we can use the cross-entropy loss:
```
cross_entropy_loss = nn.CrossEntropyLoss()
```
*Note*: the `nn.CrossEntropyLoss()` loss function composes `nn.LogSoftmax()` and `nn.NLLLoss()`. If the model outputs raw values `nn.CrossEntropyLoss()` should be used, while if the model output `nn.LogSoftmax()` then `nn.NLLLoss()` should be used as loss function.
In order to update the weights of our network we also need to define an optimiser. Here we use the simple stocastic gradient descent (SGD) optimiser:
```
# Learning rate
learning_rate = 0.003
# Define optimizer and link with model parameters
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
```
Finally we can train our model by looping over the batches and updating the weights of our network using backpropagation. The backpropagation algorithm works as follows:
1. Initialize gradients to zero
2. Perform a forward pass
3. Compute the loss
4. Perform backpropagation to compute the gradients
5. Update the paramerters of the model (weights)
```
# Define the number of training. epochs
epochs = 10
# Loop over epochs
for epoch in range(epochs):
# Initialize total epoch loss
epoch_loss = 0
# Loop over batches
for images, labels in trainloader:
# Flatten input image
images = images.view(images.shape[0], -1)
# Initialize the gradients to zero
optimizer.zero_grad()
# Perform forward pass
output = model(images)
# Compute the loss
loss = cross_entropy_loss(output, labels)
# Perform backpropagation
loss.backward()
# Update the weights
optimizer.step()
# Accumulate total epoch loss
epoch_loss += loss.item()
else:
print(f"Loss #{epoch+1}: {epoch_loss/len(trainloader)}")
```
Finally, we can plot class probabilities after training for a single batch:
```
images, labels = next(dataiter)
showclassp(images, labels, model)
```
We see that after training the model gives reasonable results (compared to the class probabilities obtained before trasining).
## Comments
Here we visualised the model predictions on one batch of the training dataset. This is not a fair evaluation of our model performance since this btach has been used multiple times to change the model weights. NN are known to easily overfit the training data and therefore one has to use regularization rechniques (such as *dropouts*, *eraly stopping*, ...) and validate the model on a different test set.
| github_jupyter |
# MLflow Training Tutorial
This `train.pynb` Jupyter notebook is an example for using elastalert with mlflow together.
> This is the Jupyter notebook version of the `train.py` example
```
from sklearn.svm import OneClassSVM
ES_URL = "http://192.168.122.3:9200"
ES_INDEX = "logs-endpoint-winevent-sysmon-*"
COLUMNS = ["agent.hostname", "event.code"]
DROP_NA_COLUMNS = COLUMNS
MODEL = OneClassSVM
# imports
import pandas as pd
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
import functools
import csv
from os.path import isfile as isfile
def get_data(elast_url, index, columns):
def save_to_csv(elast_url, index, columns, file_name):
print("saving to csv as file did not exist")
es = Elasticsearch(elast_url,timeout=600)
s = Search(using=es, index=index).query().source(fields=columns)
with open(file_name, mode='w') as es_fd:
writer = csv.DictWriter(es_fd, fieldnames=columns)
writer.writeheader()
for hit in s.scan():
# handles nested objects in response because of multilevel keys (i.e. agent.hostname)
# ac
def rgetattr(obj, attr):
def _getattr(obj, attr):
return getattr(obj, attr)
return functools.reduce(_getattr, [obj] + attr.split('.'))
hit_dict = {column: rgetattr(hit, column) for column in columns}
writer.writerow(hit_dict)
# TODO remove in production
break
def read_from_csv(csv_file):
data = pd.read_csv(csv_file)
return data
file_name_clear = ("{}{}{}{}{}"
.format(
len(elast_url),
elast_url,
len(index),
index,
len(columns),
".".join(columns)))
file_name = (str(hashlib.sha1(file_name_clear.encode("UTF-8")).hexdigest()[:10]) + ".csv")
print("filename: {}, filename_clear: {}".format(file_name,file_name_clear))
if not isfile(file_name):
save_to_csv(elast_url,index,columns,file_name)
data_frame = read_from_csv(file_name)
if len(DROP_NA_COLUMNS) > 0:
data_frame.dropna(subset=DROP_NA_COLUMNS,how="any")
return data_frame[columns]
from sklearn.svm import OneClassSVM
from sklearn.pipeline import Pipeline
import mlflow
import mlflow.sklearn
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
def build_pipeline(data, *params):
np.random.seed(40)
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
numeric_features = data.select_dtypes(include=['int64', 'float64']).columns
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
categorical_features = data.select_dtypes(include=['object']).columns
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# create Model
o_svm = MODEL()
# create pipeline
pipe = Pipeline([('preprocessor', preprocessor),
('svc', o_svm)])
return pipe
from collections import Counter
def log_output(pipe,data):
mlflow.sklearn.log_model(pipe, "model")
params = pipe.steps[-1][1].get_params()
mlflow.log_param("model_param", params)
predictions = pipe.predict(data)
for k,v in Counter(predictions).items():
mlflow.log_metric("pred_{}".format(k), v)
import logging
import warnings
def train(*params):
# setup logging
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
np.random.seed(40)
elast_url = ES_URL
index = ES_INDEX
data = get_data(elast_url, index, columns=COLUMNS)
with mlflow.start_run():
pipe = build_pipeline(data)
pipe.fit(data)
log_output(pipe,data)
return pipe
pipe = train()
pipe
```
| github_jupyter |
## Baysian Ridge and Lasso regression
```
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
import pymc
import sys
%matplotlib inline
n = 10000
x1 = norm.rvs(0, 1, size=n) + norm.rvs(0, 10**-3, size=n)
x2 = -x1 + norm.rvs(0, 10**-3, size=n)
x3 = norm.rvs(0, 1, size=n)
X = np.column_stack([x1, x2, x3])
y = 10 * x1 + 10 * x2 + 0.1 * x3
```
### Exercise: implement ordinary least squares
$$ \beta = (X^{T}X)^{-1}X^{T}y $$
```
#your code here
np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
```
### Bayesian ordinary least squares
```
beta_min = -10**6
beta_max = 10**6
beta1_ols = pymc.Uniform(name='beta1', lower=beta_min, upper=beta_max)
beta2_ols = pymc.Uniform(name='beta2', lower=beta_min, upper=beta_max)
beta3_ols = pymc.Uniform(name='beta3', lower=beta_min, upper=beta_max)
@pymc.deterministic
def y_hat_ols(beta1=beta1_ols, beta2=beta2_ols, beta3=beta3_ols, x1=x1, x2=x2, x3=x3):
return beta1 * x1 + beta2 * x2 + beta3 * x3
Y_ols = pymc.Normal(name='Y', mu=y_hat_ols, tau=1.0, value=y, observed=True)
ols_model = pymc.Model([Y_ols, beta1_ols, beta2_ols, beta3_ols])
ols_map = pymc.MAP(ols_model)
ols_map.fit(method='fmin_l_bfgs_b', iterlim=100000, tol=.0001)
def get_coefficients(map_):
return [{str(variable): variable.value} for variable in map_.variables if str(variable).startswith('beta')]
get_coefficients(ols_map)
```
### Exercise: bayesian ridge regression
Note baysian ridge regression is constructed by assuming normal prior on betas, use $tau=1.0, mu=0$
```
#your code here
tau = 1.0
beta1_ridge = pymc.Normal('beta1', mu=0, tau=tau)
beta2_ridge = pymc.Normal('beta2', mu=0, tau=tau)
beta3_ridge = pymc.Normal('beta3', mu=0, tau=tau)
@pymc.deterministic
def y_hat_ridge(beta1=beta1_ridge, beta2=beta2_ridge, beta3=beta3_ridge, x1=x1, x2=x2, x3=x3):
return beta1 * x1 + beta2 * x2 + beta3 * x3
Y_ridge = pymc.Normal('Y', mu=y_hat_ridge, tau=1.0, value=y, observed=True)
ridge_model = pymc.Model([Y_ridge, beta1_ridge, beta2_ridge, beta3_ridge])
ridge_map = pymc.MAP(ridge_model)
ridge_map.fit(method='fmin_l_bfgs_b', iterlim=1000, tol=.0001)
```
### compare to scikit learn
```
from sklearn.linear_model import RidgeCV
skl_ridge_model = RidgeCV(fit_intercept=False)
skl_ridge_model.fit(X, y)
print ("scikit results")
print (skl_ridge_model.coef_)
print ("bayesian results")
print (get_coefficients(ridge_map))
```
### Exercise: bayesian lasso regression
Note baysian lasso regression is constructed by assuming normal prior on betas, use $tau=1.0 * sqrt(2 * sigma), mu=0$
```
sigma = 1.0e1
b = np.sqrt(2.0 * sigma)
beta1_lasso = pymc.Laplace('beta1', mu=0, tau=1.0 * b)
beta2_lasso = pymc.Laplace('beta2', mu=0, tau=1.0 * b)
beta3_lasso = pymc.Laplace('beta3', mu=0, tau=1.0 * b)
@pymc.deterministic
def y_hat_lasso(beta1=beta1_lasso, beta2=beta2_lasso, beta3=beta3_lasso, x1=x1, x2=x2, x3=x3):
return beta1 * x1 + beta2 * x2 + beta3 * x3
Y_lasso = pymc.Normal('Y', mu=y_hat_lasso, tau=1.0, value=y, observed=True)
lasso_model = pymc.Model([Y_lasso, beta1_lasso, beta2_lasso, beta3_lasso])
lasso_map = pymc.MAP(lasso_model)
lasso_map.fit(method='fmin_l_bfgs_b', iterlim=10000, tol=.0001)
```
### Compare to scikit learn
```
from sklearn.linear_model import LassoCV
skl_lasso_model = LassoCV(fit_intercept=False)
skl_lasso_model.fit(X, y)
print ("Scikit results")
print (skl_lasso_model.coef_)
print ("Bayesian results")
get_coefficients(lasso_map)
```
### Ridge and Lasso on Boston dataset
```
from sklearn.datasets import load_boston
boston = load_boston()
(n, d)= boston.data.shape
print (n, d)
#lasso
sigma = 1.0e3
b = np.sqrt(2.0 * sigma2)
beta_lasso = pymc.Laplace('beta', mu=0, tau=1.0 * b, size=(d,1))
@pymc.deterministic
def y_hat_lasso(beta=beta_lasso, x=boston.data):
return np.dot(x, beta).T
Y_lasso = pymc.Normal('Y', mu=y_hat_lasso, tau=1.0, value=boston.target, observed=True)
lasso_model = pymc.Model([Y_lasso, beta_lasso])
lasso_map = pymc.MAP(lasso_model)
lasso_map.fit(method='fmin_l_bfgs_b', iterlim=1000, tol=.0001)
skl_lasso_model = LassoCV(fit_intercept=False)
skl_lasso_model.fit(boston.data, boston.target)
#Ridge
beta_ridge = pymc.Normal('beta', mu=0, tau=1.0, size=(d,1))
@pymc.deterministic
def y_hat_ridge(beta=beta_ridge, x=boston.data):
return np.dot(x, beta).T
Y_ridge = pymc.Normal('Y', mu=y_hat_ridge, tau=1.0, value=boston.target, observed=True)
ridge_model = pymc.Model([Y_ridge, beta_ridge])
ridge_map = pymc.MAP(ridge_model)
ridge_map.fit(method='fmin_l_bfgs_b', iterlim=1000, tol=.0001)
skl_ridge_model = RidgeCV(fit_intercept=False)
skl_ridge_model.fit(boston.data, boston.target)
skl_ridge_model.coef_
print ("Scikit lasso results")
skl_lasso_model.coef_
print ("Bayesian lasso results")
get_coefficients(lasso_map)[0]['beta'][:, 0]
print ("Scikit Ridge results")
print(skl_ridge_model.coef_)
print ("Bayesian Ridge results")
print (get_coefficients(ridge_map)[0]['beta'])
```
| github_jupyter |

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/CombinedLogLaw/combined-log-law.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
<h1><center>Logarithmic Laws</center></h1>
```
%%html
<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<p> Code is hidden for ease of viewing. Click the Show/Hide button to see. </>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
```
# Combined Log Law
## Introduction:
Logarithms are the inverse operation to exponentials. They are useful because they change the arithmetic operations of multiplication, division, and powers into addition, subtraction, and products. While modern calculators can do all these operations quickly, for us mere humans logarithms can be useful for doing quick approximations in our heads.
## Motivation:
Going about our day, we often run into powers of ten, when we see kilograms of food in a grocery store (1000 grams), megawatts of power from an electrical generator (1,000,000 watts) or gigabytes of memory in our computer (1,000,000,000 bytes). It is the **power** of ten that is important, and the logarithm captures this idea with the formulas
$$
\log(10) = 1
$$
$$
\log(1000) = 3
$$
$$
\log(1,000,000) =
6$$
$$
\log(1,000,000,000) = 9.
$$
The logarithm of a number $x$ is defined as the power $n$ it takes so that $x = 10^n.$ So, for instance, since $1000 = 10^3$, we know that $\log(1000) = 3,$ as indicated in the list above.
For numbers that aren't integer powers of 10, the logarithm is still defined by the above formula, where $n$ can be any real number solving $x = 10^n$. For instance, you might guess that $\log(5000)$ is somewhere between 3 and 4, since the number 5000 is halfway between $10^3 = 1000$ and $10^4 = 10,000$. You might even guess that $\log(5000) = 3.5$, which is not a bad approximation: in fact, a calculator shows that
$$
\log(5000) = 3.69897...,
$$
which is the same as saying
$$
5000 = 10^{3.69897...}.
$$
We can also take logarithms of small numbers, like this:
$$
\log(0.01) = \log\left(\frac{1}{100}\right) = \log(10^{-2}) = -2.
$$
But you cannot take logarithms of negative numbers. (Unless you are willing to learn about something called complex numbers!)
## Base for logarithm:
In the examples above, we worked with powers of ten, so ten is called the **base** for the logarithm.
We can work with other bases. For instance, with computers we often work with power of two. A KB of memory is actually $1024 = 2^{10}$ bytes.
If you aren't sure about this, multiply out
$2\times 2 \times 2 \times \ldots \times 2$ with ten 2's, to see you get $1024= 2^{10}.$
A MB of memory is $1,048,576 = 2^{20}$ bytes, or just over a million bytes. A GB is $1073741824 = 2^{30}$ bytes, or just over a billion bytes.
It's a funny coincidence that $10^3 \approx 2^{10}$ so that kilo =1000 is about the same as Kilo = 1024.
We write this down in logarithm form, adding a subscript to keep track of the base. So
$$
\log_2(1024) = 10
$$
$$
\log_2(1048576) = 20
$$
$$
\log_2(1073741824) = 30.
$$
In general, the number $\log_2(x)$ is defined as the solution to
$$
x = 2^n.
$$
Logarithms can be defined with any number $B$ as a base, provided $B$ is positive and not equal to one. The function is then written as $\log_B(x).$
## Three important bases:
In practice, there are only three log functions that occur in most of math and science:
$$
\log_2(x), \log_{10}(x), \mbox{ and } \log_e(x),
$$
which have bases 2, 10 and $e$, respectively, where $e = 2.71...$ is the natural exponential that occurs in calculus.
The base ten logarithm $\log_{10}(x)$ occurs so often that it is sometimes abbreviated as $\log(x)$, as we did in the first section of this notebook.
The base $e$ logarithm is called the natural log, written $\ln(x)$. The natural logarithm arises in calculus,
where it is often denoted simply as $\log x$. So one must pay attention to the context when the base is unspecified!
## Examples:
- {1} Can we find $\log_2(4000)$ approximately, without using a calculator?
Sure. Here's one way. We know that $4 = 2^2$, and that $1000 \approx 2^{10}$. So $4000 \approx 2^2 \times 2^{10} = 2^{12}$.
So we conclude
$$
\log_2(4000) \approx 12.
$$
A quick check with a calculator shows $\log_2(4000) = 11.96578...$ so that was a pretty good approximation!
- {2} Can we find $\log(\pi)$ approximately?
Well, our friends the ancient Egyptians thought that $\pi$ was the square root of 10. It's not, but that's a pretty good approximation. So we have
$$
\log(\pi) \approx \log(10^{1/2}) = 1/2.
$$
In fact, a check with a calculator shows $\log(\pi) = 0.49715...$, so again we have a pretty good approximation.
## Basics of Logarithms:<p>
Even though logarithms can seem very complicated, we can look at the basic relationship between logarithms and exponentials in order to simplify these expressions to furture enhance our understandings. Before looking deeper in these relationships, we will first identify the main components of a logarithmic function. Logarithms are written in the following form:<p>
$\log_B(x)=m$ where B is the base of the logarithm. <p>
Given a number $x$, we define $\log_B(x)=m$ as the solution to the exponential relationship
$$
x=B^m.
$$
## Logarithmic Laws<p>
There are 4 main logarithmic laws which help show the relationship between exponential and logarithmic functions.
- Product Law: $\log_{B}(x \times y)=\log_{B}(x)+\log_{B}(y)$
- Quotient Law: $\log_{B}( x \div y) =\log_{B}(x)-\log_{B}(y)$
- Power Law: $\log_{B}(x^p)=p\times \log_B(x)$
- Changing Base Rule: $\log_{B}(x)=\frac{\log_C(x)}{\log_C(B)}$
## Background: Exponential Laws
Since logarithms are closely related with exponents, we will be using exponential laws when deriving logarithmic laws. Exponential Laws state:
- $B^m \times B^n=B^{m+n} \quad (1) $
- $\frac{B^m}{B^n}=B^{m-n} \quad \quad \; \;\;\,(2)$
- $(B^m)^n=B^{mn} \quad \quad \,(3)$
- $(BC)^m=B^m C^m \quad \,(4)$
We will be referring to these laws throughout the program using the number in the brackets above.
```
from IPython.display import display, Latex, clear_output
from math import log
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
import numpy as np
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual,IntSlider, Output, VBox, HBox, Label
from abc import ABCMeta, abstractmethod
class Logarithm:
#############################################
# Description: stores all functions that will be using as shortcuts when graphing functions.
#############################################
@staticmethod
#A static method is contains an object that does not change.
#In this case, the functions are created to pass in arguments and should not change.
def log(m,b,x):
#########################################
# Function: y= log(mx)
#
# @Args:
# m: constant inside the logarithm
# b: base of the logarithm
# x: vector with all of the x-coordinates
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
i=x*m
return [log(y,b) for y in i]
@staticmethod
def log_exp(r,m,b,x):
#########################################
# Function: y=log((mx)^r)
#
# @Args:
# m: constant inside the logarithm
# b: base of the logarithm
# x: vector with all of the x-coordinates
# r: exponent within the logarith
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
x= (x*m)**r
return [log(y,b) for y in x]
@staticmethod
def constant_x_log(r,m,b,x):
#########################################
# Function: y=r*log(mx)
#
# @Args:
# r: constant multiplied by the logarithm
# m: constant inside the logarithm
# b: base of the logarithm
# x: vector with all of the x-coordinates
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
x= x*m
return [r*log(y,b) for y in x]
@staticmethod
def division_of_logs(m,n,b,x):
#########################################
# Function: y=log_m(nx)/log_b(nx)
#
# @Args:
# m: base of logarithm in the numerator
# b: base of logarithm in the denominator
# n: constant inside each logarithm
# x: vector with all of the x-coordinates
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
y1=Logarithm.log(m,n,x)
y2=Logarithm.log(b,n,x)
y=np.divide(y1,y2)
return y
#########################################
# Variables:
# base - Value of the base of the logarithms
# - Over the coarse of the program, we will set the base to be 10.
# x - The range of numbers that are shown on the x-axis for the graphical proofs
#########################################
base=10
x=np.linspace(1,10)
```
## <span style="color:blue"> Product Law
The first law we are looking at is the Product Law. This is used when finding the sum of two logarithmic functions with the same base. The law states that
- $\log_{B}(xy)=\log_{B}x+\log_{B}y$.
### An example
- $\log(100\times 1000) = \log(100) + \log(1000)$ or equivalently
- $\log(100,000) = 5 = 2 + 3.$
### Mathematical proof
We will look at the mathematical proof. It may look complicated, however, it can simply be broken down. First we fix quantities $x,y$ and then define
- $p=\log_B x$ and $q=\log_B y$.
The equivalent exponential forms are
- $B^p=x$ and $B^q=y$.
We take the product of these two equations to obtain
- $B^p \times B^q=x \times y$,
and from the Exponential Law (1), we can get the equivalent expression
- $B^{p+q}=x \times y$.
We apply log to both sides
- $\log_B(B^{p+q})=\log_B(x \times y),$
and then by the definition of a logarithm, we have
- $p+q=\log_B(x \times y)$.
Since we know $ p=\log_B x$ and $ q=\log_B y$, we obtain
- $\log_{B}x+\log_{B}y = \log_{B}(x \times y).$
That completes the mathematical proof of the product law.
### Graphical Demonstration
As we know, the product law states: $\log_{B}x+\log_{B}y = \log_{B}(x \times y).$
To go about this, we introduce a parameter $t$ that allows us to trace the graph of the logarithm function. We also introduce two constant integers, $m$ and $n$.
We let $x=mt$ and $y=nt$ and set the base $B$ to 10, abbreviating $\log_{10}(x)$ as $\log(x)$.
For these values of $x$ and $y$, the product law becomes
$\log(mt)+\log(nt) = \log(mnt^2)$
For the graphical demonstration, we graph the three terms in the above equation separately with respect to $t$. When looking at a $t$ value, the sum of the corresponding values of the functions on the left side of the equation should be equivalent to the function on the right side of the equation, thus providing a demonstration of the Product Law.
```
class ProductLaw():
# Create 2x2 sub plots
gs = gridspec.GridSpec(2, 2)
axis=5
x=6
y=3
x_axis_bar = widgets.IntSlider(
value=5,
min=1,
max=10,
step=1,
description='$t$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
x_bar = widgets.IntSlider(
value=x,
min=1,
max=10,
step=1,
description='$m$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
y_bar = widgets.IntSlider(
value=y,
min=1,
max=10,
step=1,
description='$n$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def create_graph():
#########################################
# Description: generates a graph in order to prove Product Law
#
# @Args: Inputs are for the variables shown in ProductLaw
# x-coordinate: based on a sliding bar in range 1-10
# M constant: based on a sliding bar in range 1-10
# N constant: based on a sliding bar in range 1-10
#
# @Return: graph for graphical proof as well as the y-coordinate corresponding to the graphed points
#
#########################################
#Plot the 3 log functions from the left and right side of the Product Law
ax1 = plt.subplot(ProductLaw.gs[0, 0]) # row 0, col 0
ax1.plot(x,Logarithm.log(ProductLaw.x,base,x),'-b',label='$y=\log_{B}(x)$')
p1=log(ProductLaw.x*ProductLaw.axis,base)
ax1.plot(ProductLaw.axis,p1,'ob')
ax1.annotate('%1.3f' %p1,xy=(ProductLaw.axis,p1),xytext=(-10,-20),textcoords='offset points')
ax1.set_title('Left side of Product law')
plt.ylabel('$\log_{B}(mt)$')
ax1.yaxis.set_label_position("left")
plt.grid()
ax2 = plt.subplot(ProductLaw.gs[1, 0])
ax2.plot(x,Logarithm.log(ProductLaw.y,base,x),'-g',label='$y=\log_{B}(y)$')
p2=log(ProductLaw.y*ProductLaw.axis,base)
ax2.plot(ProductLaw.axis,p2,'og')
ax2.annotate('%1.3f' %p2,xy=(ProductLaw.axis,p2),xytext=(-10,-20),textcoords='offset points')
plt.ylabel('$\log_{B}(nt)$')
ax2.yaxis.set_label_position("left")
plt.xlabel('$t$')
plt.grid()
ax3 = plt.subplot(ProductLaw.gs[:, 1])
ax3.plot(x,Logarithm.log(ProductLaw.x*ProductLaw.y,base,x**2),'-r',label='$y=\log_{B}(xy)$')
p3=log(ProductLaw.x*ProductLaw.y*(ProductLaw.axis**2),base)
ax3.plot(ProductLaw.axis,p3,'or')
ax3.annotate('%1.3f' %p3,xy=(ProductLaw.axis,p3),xytext=(-10,-20),textcoords='offset points')
ax3.set_title('Right side of Product Law')
plt.ylabel('$\log_{B}(mnt^2)$')
ax3.yaxis.set_label_position("right")
plt.xlabel('$t$')
plt.grid()
plt.show()
display(Latex('When $m$={1:1d} and $n$={2:1d}'.format(ProductLaw.axis,ProductLaw.x,ProductLaw.y)))
#Display the value of the points to prove that the law is valid
display(Latex('From the marked y-coordinates on the graph above, the points at log({0:1d}$t$), log({1:1d}$t$) and log({2:1d}$t^2$) are at {3:1.3f}, {4:1.3f} and {5:1.3f} repectively'.format(ProductLaw.x,ProductLaw.y,ProductLaw.x*ProductLaw.y,p1, p2, p3)))
display(Latex('{0:1.3f}+{1:1.3f}={2:1.3f}'.format(p1,p2,p1+p2)))
display(Latex('{0:1.3f}={1:1.3f}'.format(p3,p3)))
display(Latex('This means that the left side of the equation equals the right side'))
display(Latex('thus'))
display(Latex(r'$\log_{B}x+\log_{B}y = \log_{B}(x \times y)$'))
def clear_display():
clear_output(wait=True)
display(ProductLaw.x_bar)
display(ProductLaw.y_bar)
display(ProductLaw.x_axis_bar)
ProductLaw.create_graph()
ProductLaw.observe()
def observe():
ProductLaw.x_axis_bar.observe(ProductLaw.xv, names='value')
ProductLaw.x_bar.observe(ProductLaw.x_barv, names='value')
ProductLaw.y_bar.observe(ProductLaw.y_barv, names='value')
#ProductLaw.clear_display()
def xv(value):
ProductLaw.axis=value['new']
ProductLaw.clear_display()
def x_barv(value):
ProductLaw.x=value['new']
ProductLaw.clear_display()
def y_barv(value):
ProductLaw.y=value['new']
ProductLaw.clear_display()
ProductLaw.clear_display()
```
### Results
In the mathematical proof, we used the relationship between logarithms and exponents in order to derive the Product Law. Based on the values recorded during the graphical proof, we see that the left-hand side of the law is equivalent to sum of the two functions on the right-hand side.
## <span style="color:blue"> Quotient Law
The next law we will be looking at is the Quotient Law. This is used when finding the difference of two logarithmic functions. The law states that
- $\log_{B}(x \div y)=\log_{B}x -\log_{B}y$.
### An example
- $\log(1000 \div 100) = \log(1000) - \log(100)$ or equivalently
- $\log(10) = 1 = 3 -2.$
### Mathematical proof
Let's create a proof of the Quotient law.
First, fix quantities $x$ and $y$ and define the values
- $ p = \log_B x$ and $ q = \log_B y.$
The equivalent exponential forms are
- $B^p= x$ and $B^q = y$.
Divide these two equations to obtain: <br />
- $B^p \div B^q = x \div y.$
Using Exponential Law (2), the above equation is equivalent to: <br />
- $B^{p-q}=x \div y.$
Taking logs, we have
- $\log_{B}(B^{p-q}) = \log_B(x\div y) $ which becomes
- $p - q = \log_{B}(x \div y).$
Recalling our definition of m,n this becomes
- $\log_B x - \log_B y = \log_B(x\div y),$
which completes the proof of the Quotient Law.
### Graphical Demonstration
As we know, the Quotient Law states: $\log_{B}x+\log_{B}y = \log_{B}(x \times y).$
To go about this, we introduce a parameter $t$ that allows us to trace the graph of the logarithm function. We will also introduce two constant integers, $m$ and $n$.
We let $x=mt$ and $y=nt$ and set the base $B$ to 10, abbreviating $\log_{10}(x)$ as $\log(x)$.
For these values of $x$ and $y$, the product law becomes
$\log(mt)-\log(nt) = \log\left(\frac{mt}{nt}\right)$
which reduces to:
$\log(mt)-\log(nt) = \log\left(\frac{m}{n}\right)$
For the graphical demonstration, we will graph the three terms in the above equation separately with respect to $t$. When looking at a $t$ value, the difference of the corresponding values of the functions on the left side of the equation should be equivalent to the function on the right side of the equation, thus providing a demonstration of the Quotient Law.
```
class QuotientLaw():
# Create 2x2 sub plots
gs = gridspec.GridSpec(2, 2)
axis=5
x=6
y=3
x_axis_bar = widgets.IntSlider(
value=5,
min=1,
max=10,
step=1,
description='x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
x_bar = widgets.IntSlider(
value=x,
min=1,
max=10,
step=1,
description='$m$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
y_bar = widgets.IntSlider(
value=y,
min=1,
max=10,
step=1,
description='$n$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def create_graph():
#########################################
# Description: generates a graph in order to prove Quotient Law
#
# @Args: Inputs are for the variables shown in Quotient Law
# x-coordinate: based on a sliding bar in range 1-10
# M constant: based on a sliding bar in range 1-10
# N constant: based on a sliding bar in range 1-10
#
# @Return: graph for graphical proof as well as the y-coordinate corresponding to the graphed points
#
#########################################
y_value=np.linspace(QuotientLaw.x/QuotientLaw.y,QuotientLaw.x/QuotientLaw.y)
#Plot the 3 log functions from the left and right side of the Product Law
ax1 = plt.subplot(QuotientLaw.gs[0, 0]) # row 0, col 0
ax1.plot(x,Logarithm.log(QuotientLaw.x,base,x),'-b')
p1=log(QuotientLaw.x*QuotientLaw.axis,base)
ax1.plot(QuotientLaw.axis,p1,'ob')
ax1.annotate('%1.3f' %p1,xy=(QuotientLaw.axis,p1),xytext=(-10,-20),textcoords='offset points')
ax1.set_title('Left side of Quotient Law')
plt.ylabel('$\log(m)$')
plt.grid()
ax2 = plt.subplot(QuotientLaw.gs[1, 0])
ax2.plot(x,Logarithm.log(QuotientLaw.y,base,x),'-g')
p2=log(QuotientLaw.y*QuotientLaw.axis,base)
ax2.plot(QuotientLaw.axis,p2,'og')
ax2.annotate('%1.3f' %p2,xy=(QuotientLaw.axis,p2),xytext=(-10,-20),textcoords='offset points')
plt.ylabel('$\log(n)$')
plt.xlabel('x')
plt.grid()
ax3 = plt.subplot(QuotientLaw.gs[:, 1])
ax3.plot(x,Logarithm.log(1,base,y_value),'-r')
p3=log(QuotientLaw.x/QuotientLaw.y,base)
ax3.plot(QuotientLaw.axis,p3,'or')
ax3.annotate('%1.3f' %p3,xy=(QuotientLaw.axis,p3),xytext=(-10,-20),textcoords='offset points')
ax3.set_title('Right side of Quotient Law')
plt.ylabel(r'$\log(\frac{m}{n})$')
ax3.yaxis.set_label_position("right")
plt.xlabel('x')
plt.grid()
plt.show()
display(Latex('When $m$={1:2.0f} and $n$={2:2.0f}'.format(QuotientLaw.axis,QuotientLaw.x,QuotientLaw.y)))
display(Latex('The y-coordinates at log({0:1.0f}$t$), log({1:1.0f}$t$) and log({2:1.0f}) are at {3:1.3f}, {4:1.3f} and {5:1.3f} repectively'.format(QuotientLaw.x,QuotientLaw.y,QuotientLaw.x/QuotientLaw.y,p1, p2, p3)))
display(Latex('{0:1.3f}-{1:1.3f}={2:1.3f}'.format(p1,p2,p3)))
display(Latex('thus'))
display(Latex(r'$\log(m) - \log(n) = \log(\frac{m}{n})$'))
def clear_display():
clear_output(wait=True)
display(QuotientLaw.x_bar)
display(QuotientLaw.y_bar)
display(QuotientLaw.x_axis_bar)
QuotientLaw.create_graph()
QuotientLaw.observe()
def observe():
QuotientLaw.x_axis_bar.observe(QuotientLaw.x_value, names='value')
QuotientLaw.x_bar.observe(QuotientLaw.xv, names='value')
QuotientLaw.y_bar.observe(QuotientLaw.yv, names='value')
def x_value(value):
QuotientLaw.axis=value['new']
QuotientLaw.clear_display()
def xv(value):
QuotientLaw.x=value['new']
QuotientLaw.clear_display()
def yv(value):
QuotientLaw.y=value['new']
QuotientLaw.clear_display()
QuotientLaw.clear_display()
```
### Result
In the mathematical proof, we used the relationship between logarithms and exponents as well as exponential laws in order to derive the Quotient Law. When we look at the graphical demonstration, we see that the functions on the right hand side both resemble very similar curves. On the left hand side of the law, we can see that the function remains as a constant number. We also see that the left-hand side of the law is equivalent to the difference to the two functions on the right-hand side.
## <span style="color:blue"> Power Law
The next law we will look at is power law. This is used in the case when there is an exponential power inside the logarithmic function. The law states that
- $\log_{B}(x^p)=p \times \log_B(x)$.
### An example
- $\log(1000^2) = 2\log(1000) $ or equivalently
- $\log(1,000,000) = 6 = 2 \times 3.$
### Mathematical Proof
First we fix quantities $x$ and $p$ then define
- $ m = \log_B (x^p).$
The equivalent exponential form is
- $B^m=x^p$.
Bring each side of the equation to the power of $1/p$ to obtain
- $(B^m)^{\frac{1}{p}}=(x^p)^{\frac{1}{p}}.$
By using Exponential Law (3), we can multiply the exponents to the one inside the brackets to get
- $B^{\frac{m}{p}}= x.$
Apply the log function to both sides to get
- $\log_B(B^{\frac{m}{p}})=\log_B(x) $, resulting in
- $\frac{m}{p} = \log_B(x).$
Multiply by $p$ to obtain
- $m = p \times log_B(x),$ and recalling the definition of m, we have
- $\log_B(x^p) = p \times \log_B(x).$
This completes the proof.
### Graphical Demonstration
In this case, there is one function on each the left and right hand sides of the law. For this reason 2 functions will be graphed. Since they are theoretically be equivalent to each other, we can expect that the functions will be identical on the graph. If this is seen on the graph, we can validate Power Law.
As we know, the power Law states: $\log_B(x^p) = p \times \log_B(x).$
To go about this, we introduce a parameter $t$ that allows us to trace the graph of the logarithm function. We will also introduce a constant interger, $m$.
We let $x=mt$ and set the base $B$ to 10, abbreviating $\log{10}(x)$ as $\log(x)$.
For these values of $x$ and $y$, the product law becomes
$\log_B(mt^p) = p \times \log_B(mt)$
For the graphical demonstration, we will graph the three terms in the above equation separately with respect to $t$. When looking at a $t$ value, the function on the left side of the equation should be equivalent to the function on the right side of the equation, thus providing a demonstration of the power law.
```
class PowerLaw():
# Create 2x2 sub plots
gs = gridspec.GridSpec(1, 2)
x=np.linspace(1,10)
axis=5
x=6
p=2
x_axis_bar = widgets.IntSlider(
value=5,
min=1,
max=10,
step=1,
description='x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
x_bar = widgets.IntSlider(
value=x,
min=1,
max=10,
step=1,
description='$m$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
p_bar = widgets.IntSlider(
value=p,
min=1,
max=10,
step=1,
description='$p$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def create_graph():
#########################################
# Description: generates a graph in order to prove Power Law
#
# @Args: Inputs are for the variables shown in Power Law
# x-coordinate: based on a sliding bar in range 1-10
# M constant: based on a sliding bar in range 1-10
# N constant: based on a sliding bar in range 1-10
# R exponential constant: based on a sliding bar in range 1-10
#
# @Return: graph for graphical proof as well as the y-coordinate corresponding to the graphed points
#
#########################################
#Plot the 3 log functions from the left and right side of the Product Law
ax1 = plt.subplot(PowerLaw.gs[0,1]) # row 0, col 0
ax1.plot(x,Logarithm.log_exp(PowerLaw.p,PowerLaw.x,base,x),'-g')
p1=log((PowerLaw.x*PowerLaw.axis)**PowerLaw.p,base)
ax1.plot(PowerLaw.axis,p1,'ob')
ax1.annotate('%1.3f' %p1,xy=(PowerLaw.axis,p1),xytext=(-10,-20),textcoords='offset points')
ax1.set_title('Right side of Power law')
plt.ylabel('$y=\log_{B}(Mx)$')
ax1.yaxis.set_label_position("right")
plt.xlabel('x')
plt.grid()
ax2 = plt.subplot(PowerLaw.gs[0, 0])
ax2.plot(x,Logarithm.constant_x_log(PowerLaw.p,PowerLaw.x,base,x),'-b')
p2=PowerLaw.p*log(PowerLaw.x*PowerLaw.axis,base)
ax2.plot(PowerLaw.axis,p2,'og')
ax2.annotate('%1.3f' %p2,xy=(PowerLaw.axis,p2),xytext=(-10,-20),textcoords='offset points')
plt.ylabel('$y=\log_{B}(Nx)$')
ax2.yaxis.set_label_position("left")
ax2.set_title('Left side of Power Law')
plt.xlabel('x')
plt.grid()
plt.show()
display(Latex('at $m$={0:1d} and $p$={1:1d}'.format(PowerLaw.x,PowerLaw.p)))
display(Latex(r'We can see that the y-coordinates are labeled on the graph. At the points log(${0:1d}^{1:1d}x$) and {2:1d} $\times$ log({3:1d}$x$) the y-coordinates are {4:1.3f} and {5:1.3f} repectively'.format(PowerLaw.x,PowerLaw.p,PowerLaw.p,PowerLaw.x,p1,p2)))
display(Latex('{0:1.3f}={1:1.3f}'.format(p1,p2)))
display(Latex('thus'))
display(Latex(r'$\log_{B}(x^p)=p \times \log_B(x)$'))
def clear_display():
clear_output(wait=True)
display(PowerLaw.x_bar)
display(PowerLaw.p_bar)
display(PowerLaw.x_axis_bar)
PowerLaw.create_graph()
PowerLaw.observe()
def observe():
PowerLaw.x_axis_bar.observe(PowerLaw.x_value, names='value')
PowerLaw.x_bar.observe(PowerLaw.xv, names='value')
PowerLaw.p_bar.observe(PowerLaw.pv, names='value')
def x_value(value):
PowerLaw.axis=value['new']
PowerLaw.clear_display()
def xv(value):
PowerLaw.x=value['new']
PowerLaw.clear_display()
def pv(value):
PowerLaw.p=value['new']
PowerLaw.clear_display()
PowerLaw.clear_display()
```
### Results
The Mathematical proof shows that by first converting the logarithmic functions into exponents then using the exponential laws we can derive the power Law. When looking at the graph, we see that the function on the left-hand side are equavalent to the right-hand side.
## <span style="color:blue"> Change of Base Rule
This rule is useful for changing the base of a logarithmic function which can be useful for proofs or comparing certain functions. The law states that: <br />
$\log_{B}(x)=\frac{\log_C(x)}{\log_C(B)}$
### An example
- $\log_8(64) = \frac{\log_2(64)}{\log_2(8)}$ or equivalently
- $2 = \frac{6}{3}.$
### Mathematical Proof
First we need to define a variable. In this case, we will use x.
- $\text{Let }x=\log_{B}(M)$
When converting this to exponents by using basic logarithmic properties, we get:
- $B^x=M$
$\text{Next, is to apply } \log_N \text{ to both sides of the equation}$
- $\log_N(B^x)=\log_N(M)$
By Power Law (see above) this can be simplified to:
- $x\log_N(B)=\log_N(M)$
Isolating for x:
- $x=\frac{\log_N(M)}{\log_N(B)}$
After inputing the x value we defined earlier, we get:
- $\log_{B}(M)=\frac{\log_N(M)}{\log_N(B)}$
### Discussion
The change of base law says that
- $\log_B(x) = \frac{\log_C(x)}{\log_C(B)}.$
Another way to write this is
- $\log_B(x) = \log_C(x)\times \log_B(C)).$ (Can you see why?)
The point is, the two functions $\log_B(x), \log_C(x)$ are related by a proportionality constant, so we can write
$$ \log_B(x) = k\cdot \log_C(x).$$
For instance, the two functions $\log_2(x)$ and $\log_{10}(x)$ are the same, up to some constant $k$. Perhaps you can explain why this constant is approximately $10/3$. That is
$$\log_2(x) \approx \frac{10}{3} \log_{10}(x).$$
Equivalently,
$$\log_{10}(x) \approx 0.3 \log_{2}(x).$$
(Hint: this has something to do with our discussion of kilos in the first section of this notebook.)
### Evidence
As it is hard to graph this function, as there is no good place to put $x$, this function with be proved through evidence. We will plug numbers into each side of the equation to calculate the values obtained on each side of the law. Notice that changing the new base value has no affect on the final value.
```
class ChangeOfBase():
#First set random variables
M=5
base=10
new_base=5
def create_graph():
#########################################
# Description: Plugs in numbers to prove Change of Base Rules
#
# @Args: Inputs are for the variables shown in Power Law
# M constant: based on a sliding bar in range 1-10
# base: based on a sliding bar in range 1-10
# new base: based on a sliding bar in range 1-10
#
# @Return: The corresponding value of each side of the equation which result after plugging in the numbers.
#########################################
p1=log(ChangeOfBase.M,ChangeOfBase.base)
p2=log(ChangeOfBase.M,ChangeOfBase.new_base)/ log(ChangeOfBase.base,ChangeOfBase.new_base)
display(Latex('On the left hand side $\log_B(M)$ = {0:1.3f}.'.format(p1)))
display(Latex(r'On the right hand side is $\log_C(M) \div \log_C(B)$ = {0:1.3f}.'.format(p2)))
display(Latex('{0:1.3f} = {1:1.3f}'.format(p1,p2)))
display(Latex('thus'))
display(Latex(r'$\log_{B}(M) = \frac{\log_C(M)}{\log_C(B)}$'))
def clear_display():
clear_output(wait=True)
display(m_box)
display(base_box)
display(new_base_box)
ChangeOfBase.create_graph()
def xv(value):
ChangeOfBase.axis=value['new']
ChangeOfBase.clear_display()
def Mv(value):
ChangeOfBase.M=value['new']
ChangeOfBase.clear_display()
def Basev(value):
ChangeOfBase.base=value['new']
ChangeOfBase.clear_display()
def New_basev(value):
ChangeOfBase.new_base=value['new']
ChangeOfBase.clear_display()
M_bar = widgets.IntSlider(
value=ChangeOfBase.M,
min=1,
max=10,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
m_box = HBox([Label('M value'), M_bar])
base_bar = widgets.IntSlider(
value=ChangeOfBase.base,
min=2,
max=10,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
base_box = HBox([Label('Original base value'), base_bar])
new_base_bar = widgets.IntSlider(
value=ChangeOfBase.new_base,
min=2,
max=10,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
new_base_box = HBox([Label('New base value'), new_base_bar])
ChangeOfBase.clear_display()
M_bar.observe(ChangeOfBase.Mv, names='value')
base_bar.observe(ChangeOfBase.Basev, names='value')
new_base_bar.observe(ChangeOfBase.New_basev, names='value')
```
### Results
The mathematical proof uses the relationship between logarithms and exponents in order to change the value of the base and thus derive the rule. When plugging numbers into the rule, we can see that the left hand side of the equation is always equal to the right hand side, regardless of the numbers that are plugged in. By these 2 proofs, we can confirm the changing base rule.
## <span style="color:blue"> Examples
### 1. Simplify the following equation, then solve for $x$
### $ 3^{\log(x)}3^{\log(x)}$
Using Exponential Law (1), we can get:
- $3^{\log(x)+\log(x)}$
This is simplified to:
- $3^{2\log(x)}$
Using Power law, we know $2\log(x)=\log(x^2)$. From this identity, we simplify the expression to:
- $3^{\log(x^2)}$
To solve for $x$, we need to first note that $3 = 10^{\log 3}.$ So we can continue with
- $ 3^{\log(x^2)} = 10^{\log 3 \log x^2} = 10^{\log x^{2\log 3}} = x^{2\log 3} = x^{\log(9)}.$
Thus, we can say:
- $x^{\log(9)} \approx x^{.954}$ </p>
### 2. $\text{Simplify the expression: } 2\log(x) - \frac{\log(z)}{2} + 3\log(y)$
Next, we will apply Power Law on each term. While keeping in mind that $z^{\frac{1}{2}}=\sqrt{z}$, we can simplify this equation to:
- $\log\left(x^2\right) - \log(\sqrt{z})+\log(y^3)$
We can apply both Quotient and Product Law to this equation. This will result in the final simplified form of:
<div id='page' style='width: 100px'>
<p style="border:2px; border-style:solid; border-color:#000000;"> $\quad \log(\frac{x^2y^3}{\sqrt{z}})$ <p />
</div>
### 3. Solve for x $ 2^{(x-2)}=2^x -2 $
Using Exponent Law (2) reguarding the division of exponents, we can come up with the equivalent equation:
- $\frac{2^x}{2^2}=2^x-2$
The next step is to complete simple algebra. We can put all of the $2^x$ terms on the same side to isolate for x.
The intermediate step is:
- $2^x=4(2^x-2)$<br>
When we put all of the $2^x$ terms on the same side, we get:
- $-3(2^x)=-8$
which becomes:
- $2^x = \frac{8}{3}$
Since we know, $\log_2(2)=1$, we can apply $\log_2$ onto both sides. We get:
- $\log_2(2^x)=\log_2\left(\frac{8}{3}\right)$
Using Power Law, $\log_2(2^x)$ is equivalent to $ x\log_22$ where $\log_22 = 1$. Thus:
<div id='page' style='width: 100px'>
<p style="border:2px; border-style:solid; border-color:#000000;"> $x= \log_2\left(\frac{8}{3}\right)$ <p />
## Conclusion
When analysing each of the 5 functions in mathematical and graphical ways, that each of these 5 laws can be proven and validated. This creates shortcuts to make it easier to simplify and analyze more complex functions.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| github_jupyter |
# Confounding Example: Finding causal effects from observed data
Suppose you are given some data with treatment and outcome. Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
```
import os, sys
sys.path.append(os.path.abspath("../../"))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
```
## Let's create a mystery dataset for which we need to determine whether there is a causal effect.
Creating the dataset. It is generated from either one of two models:
* **Model 1**: Treatment does cause outcome.
* **Model 2**: Treatment does not cause outcome. All observed correlation is due to a common cause.
```
rvar = 1 if np.random.uniform() >0.5 else 0
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar, sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
```
## Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?*
### STEP 1: Model the problem as a causal graph
Initializing the causal model.
```
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"])
model.view_model(layout="dot")
```
Showing the causal model stored in the local file "causal_model.png"
```
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
```
### STEP 2: Identify causal effect using properties of the formal causal graph
Identify the causal effect using properties of the causal graph.
```
identified_estimand = model.identify_effect()
print(identified_estimand)
```
### STEP 3: Estimate the causal effect
Once we have identified the estimand, we can use any statistical method to estimate the causal effect.
Let's use Linear Regression for simplicity.
```
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
```
### Checking if the estimate is correct
```
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
```
### Step 4: Refuting the estimate
We can also refute the estimate to check its robustness to assumptions (*aka* sensitivity analysis, but on steroids).
### Adding a random common cause variable
```
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
```
### Replacing treatment with a random (placebo) variable
```
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
```
### Removing a random subset of the data
```
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
```
As you can see, our causal estimator is robust to simple refutations.
| github_jupyter |
# 수학
## 1) 나머지 연산
C++
- int: 2^31-1
- long long: 2^63-1
- 10^18 정도, 따라서 정답을 나눈 값을 return하도록 함
- **답을 M으로 나눈 나머지 출력**
1) 덧셈 곱셈(https://www.acmicpc.net/problem/10430)
~~~
# mod = %
(A + B) % M = {(A % C) + (B % C)} % C
(A X B) % M = {(A % C) X (B % C)} % C
~~~
2) 뺄셈: 더해주고 나눔
~~~
0 <= A % C <= C
0 <= B % C <= C
-C < A % C - B % C < 2C
0 < A % C - B % C + C < 3C
~~~
3) 나눗셈: 페르마의 소정리
- A, B 서로소
- C: 소수
~~~
(A / B) % C = {A X B^(C-2)} % C
~~~
```
# 내풀이
a = input()
a = [int(x) for x in a.split()]
A = a[0]; B = a[1]; C = a[2]
print((A+B)%C)
print((A%C + B%C)%C)
print((A*B)%C)
print((A%C * B%C)%C)
# 답
A,B,C = map(int, input().split())
print((A+B)%C)
print((A%C + B%C)%C)
print((A*B)%C)
print((A%C * B%C)%C)
```
---
## 2) 최대공약수/공배수
- 정답이 분수일 때, 기약분수로 만들 때 사용
1) 최대공약수GCD: 유클리스 호제법사용
- 재귀: log_n
- 반복: log_n
- 3개 수: gcd(gcd(a, b), c)
```
# 2.1 최대공약수
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a%b)
gcd(9, 25)
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
gcd(29, 20)
gcd(gcd(5, 25), 30)
```
## 3) 소수
- 알고리즘 두 가지
- 어떤 수 N이 소수인지 아닌지
- a. 2 ~ n-2 다 돌려보기
- b. (2보다 크거나 같고, **N/2 보다 작거나 같은 자연수**)로 나누어 떨어지면 안됨
- N = a*b -> 최소2(1 제외) -> 최대 2/N -> 소수 = 1*a
- c. **루트N 보다 작거나 같은 자연수**로 나누어 떨어지면 안됨
- N보다 작거나 같은 모든 소수 찾기
- **에라토스테네스의 체**: 소수 -> 배수 지우기 (N log(log(N)))
- 구현에서는 지우는 게 아니라, 검산방식
- generate가 아니라 delete: **남은 놈이 prime number**
- **골드바흐의 추측**: 2보다 큰 모든 **짝수**는 두 소수의 합으로 표현 가능
- 10^18 이하에서 증명됨
## 3.1) num은 prime인가?
- best: O($\sqrt{N}$)
### num보다 작은 모든 수로 num을 나눔
```
def is_prime(num):
if num < 2:
return False
for i in range(2, num):
if num % i == 0:
return False
return True
is_prime(1)
int(3/2)
```
### num/2까지의 수로 num을 나눔
```
def is_prime(num):
if num < 2:
return False
for i in range(2, int(num/2)+1):
if num % i == 0:
return False
return True
is_prime(923123)
from math import sqrt
def is_prime(num):
if num < 2:
return False
for i in range(2, int(sqrt(num/2))+1):
if num % i == 0:
return False
return True
is_prime(924311233377731)
def is_prime(num):
if num < 2:
return False
i = 2
while i*i <= num:
if num % i == 0:
return False
i += 1
return True
is_prime(923123)
```
## 3.2) 소수 갯수 찾기
- limit 보다 작은 수 중에 소수: 최초 소수의 배수를 삭제
```
# bad
import time
start = time.time()
limit = 100000000
a = [True]*(limit+1)
a[0] = a[1] = False
ans = 0
for i in range(2, limit+1):
if a[i]:
ans += 1
for j in range(i+i, limit, i):
a[j] = False
end = time.time()
print(end-start)
# good
import time
start = time.time()
Max = 10000000
check = [True] * (Max+1)
check[0] = check[1] = False
ans = 0
for i in range(2, Max+1):
if check[i]:
ans += 1
j = i+i
while j <= Max:
check[j] = False
j += i
end = time.time()
print(end-start)
```
---
### 문제: https://www.acmicpc.net/problem/1978
```
def is_prime(n):
if n < 2:
return False
i = 2
while i*i <= n:
if n % i == 0:
return False
i += 1
return True
_ = input()
num = list(map(int, input().split()))
ans = list(filter(is_prime, num))
print(len(ans))
```
### 문제 https://www.acmicpc.net/problem/1929
```
# 답3
MAX = 1000000
check = [True]*(MAX+1)
check[0] = check[1] = False
for i in range(2, MAX+1):
if check[i]:
j = i*i
while j <= MAX:
check[j] = False
j += i
m, n = map(int, input().split())
for i in range(m, n+1):
if check[i]:
print(i)
# 답2
MAX = 1000000
check = [True]*(MAX+1)
check[0] = check[1] = False
for i in range(2, MAX+1):
if check[i]:
for j in range(i*i, MAX+1, i):
check[j] = False
m, n = map(int, input().split())
for i in range(m, n+1):
if check[i]:
print(i)
# 정답1
m, n = map(int, input().split())
check = [True] * (n+1)
check[0] = check[1] = False
for i in range(2, n+1):
if check[i]:
for j in range(i*i, n+1, i):
check[j] = False
if i >= m:
print(i)
```
---
## 4. 골드바흐 추측
- 모든 짝수는 두 홀수 소수로 나타낼 수 있음
- https://www.acmicpc.net/problem/6588
```
test = 5
Max = 10
def prime_list(Max):
ans = []
check = [True] * (Max+1)
check[0] = check[1] = False
for i in range(2, Max+1):
if check[i]:
for j in range(i*i, Max+1, i):
check[j] = False
if i % 2:
ans.append(i)
return ans
ans = prime_list(Max)
def test(num):
for a in ans:
b = num - a
if b in ans:
```
| github_jupyter |
# Serve a Pytorch model trained on SageMaker
The model for this example was trained using this sample notebook on sagemaker - https://github.com/awslabs/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/pytorch_mnist/pytorch_mnist.ipynb
It is certainly easiler to do estimator.deploy() using the standard Sagemaker SDK if you are following that example, but cinsider this one if you have a pytorch model (or two) on S3 and you are looking for an easy way to test and deploy this model.
```
!pip install torch
!pip show sagemaker
```
## Step 1 : Write a model transform script
#### Make sure you have a ...
- "load_model" function
- input args are model path
- returns loaded model object
- model name is the same as what you saved the model file as (see above step)
<br><br>
- "predict" function
- input args are the loaded model object and a payload
- returns the result of model.predict
- make sure you format it as a single (or multiple) string return inside a list for real time (for mini batch)
- from a client, a list or string or np.array that is sent for prediction is interpreted as bytes. Do what you have to for converting back to list or string or np.array
- return the error for debugging
```
%%writefile modelscript_pytorch.py
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
from joblib import load
import numpy as np
import os
import json
from six import BytesIO
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#Return loaded model
def load_model(modelpath):
model = torch.nn.DataParallel(Net())
with open(os.path.join(modelpath, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
print("loaded")
return model.to(device)
# return prediction based on loaded model (from the step above) and an input payload
def predict(model, payload):
if type(payload) == list:
data = np.frombuffer(payload[0]['body'],dtype=np.float32).reshape(1,1,28,28)
elif type(payload) == np.ndarray:
data = payload
try:
print(type(data))
input_data = torch.Tensor(data)
model.eval()
with torch.no_grad():
out = model(input_data.to(device)).argmax(axis=1)[0].tolist()
except Exception as e:
out = str(e)
return [out]
```
### Download model locally
```
!aws s3 cp s3://ezsmdeploy/pytorchmnist/input.html ./
!aws s3 cp s3://ezsmdeploy/pytorchmnist/model.tar.gz ./
!tar xvf model.tar.gz
```
### Input data for prediction
Draw a number from 0 - 9 in the box that appears when you run the next cell
```
from IPython.display import HTML
import numpy as np
HTML(open("input.html").read())
```
## Does this work locally? (not "_in a container locally_", but _actually_ in local)
```
image = np.array([data], dtype=np.float32)
from modelscript_pytorch import *
model = load_model('./') #
predict(model,image)
```
### ok great! Now let's install ezsmdeploy
_[To Do]_: currently local; replace with pip version!
```
!pip install ezsmdeploy
import ezsmdeploy
```
#### If you have been running other inference containers in local mode, stop existing containers to avoid conflict
```
!docker container stop $(docker container ls -aq) >/dev/null
```
## Upload to your S3 bucket
```
import sagemaker
modelpath = sagemaker.session.Session().upload_data('./model.tar.gz')
```
## Deploy locally
```
ez = ezsmdeploy.Deploy(model = [modelpath], #loading pretrained MNIST model
script = 'modelscript_pytorch.py',
requirements = ['numpy','torch','joblib'], #or pass in the path to requirements.txt
instance_type = 'local',
wait = True)
```
## Test containerized version locally
Since you are downloading this model from a hub, the first time you invoke it will be slow, so invoke again to get an inference without all of the container logs
```
out = ez.predictor.predict(image.tobytes()).decode()
out
```
## Deploy on SageMaker
```
ezonsm = ezsmdeploy.Deploy(model = [modelpath],
script = 'modelscript_pytorch.py',
requirements = ['numpy','torch','joblib'], #or pass in the path to requirements.txt
wait = True,
ei = 'ml.eia2.medium') # Add a GPU accelerator
out = ezonsm.predictor.predict(image.tobytes(), target_model='model1.tar.gz').decode()
out
ezonsm.predictor.delete_endpoint()
```
| github_jupyter |
```
import pandas as pd
import itertools
file = 'legacy_data/Amtsblatt_1918.xlsx'
res_type_scheme, _ = SkosConceptScheme.objects.get_or_create(dc_title='res_type')
archiv, _ = Institution.objects.get_or_create(
written_name='Wiener Stadt- und Landesarchiv',
abbreviation="WStLA",
institution_type="Archiv"
)
df = pd.read_excel(file).fillna('False')
df
for i, row in df.iterrows():
item = None
signatur = ", ".join(
[
"Bestand: {}".format(str(row.get('Teil von Bestand', default = "-"))),
"Signatur: {}".format(str(row.get('(Archiv)Signatur', default='-'))),
"Punkt/Seite: {}".format(str(row.get('Punkt; Seite', default='-'))),
"[internal-id]: {}".format(i)
]
)
# print(signatur)
try:
item, _ = ArchResource.objects.get_or_create(
signature=signatur
)
except Exception as e:
print(e)
if item:
if row['Schlagwörter normalisiert'] != 'False':
slw = row['Schlagwörter normalisiert'].split(';')
concepts = []
for y in slw:
concepts.append(list(SkosConcept.objects.filter(pref_label=y.strip())))
concepts.append(list(SkosConcept.objects.filter(other_label__label=y.strip())))
if concepts:
item.subject_norm.set(list(itertools.chain.from_iterable(concepts)))
if row['Titel'] != 'False':
item.title = row['Titel']
if row['Zusammenfassung'] != 'False':
item.abstract = row['Zusammenfassung']
if row['Kommentar'] != 'False':
item.notes = row['Kommentar']
# if row['Datum original'] != 'False':
# item.written_date = row['Datum original'].replace('00:00:00', '')
if row['Datum normalisiert DD/MM/YYYY)'] != 'False':
try:
item.not_before = pd.to_datetime(row['Datum normalisiert DD/MM/YYYY)'])
except Exception as e:
print(e)
if row['Typ'] != 'False':
type_vocab, _ = SkosConcept.objects.get_or_create(
pref_label="{}".format(row['Typ'])
)
type_vocab.scheme.add(res_type_scheme)
item.res_type = type_vocab
if row['Orte'] != 'False':
for x in row['Orte'].split(';'):
try:
pl = Place.objects.get(name=x.strip())
except Exception as e:
pl = None
print(e, x.strip())
if pl:
item.mentioned_place.add(pl)
if row['Institutionen'] != 'False':
for x in row['Institutionen'].split(';'):
try:
pl = Institution.objects.get(written_name=x.strip())
except Exception as e:
pl = None
print(e, x.strip())
if pl:
item.mentioned_inst.add(pl)
if row['Personen'] != 'False':
for x in row['Personen'].split(';'):
try:
pl = Person.objects.get(written_name=x.strip())
except Exception as e:
pl = None
print(e, x.strip())
if pl:
item.mentioned_person.add(pl)
item.archiv = archiv
item.save()
# Person.objects.all().delete()
# ArchResource.objects.all().delete()
```
| github_jupyter |
## Dependencies
```
import json, glob
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras import layers
from tensorflow.keras.models import Model
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
!ls /kaggle/input/
input_base_path = '/kaggle/input/81-tweet-train-3fold-roberta-base-lbl-smoothing-01/'
with open(input_base_path + 'config.json') as json_file:
config = json.load(json_file)
config
vocab_path = input_base_path + 'vocab.json'
merges_path = input_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
```
# Pre process
```
test['text'].fillna('', inplace=True)
test["text"] = test["text"].apply(lambda x: x.lower())
test["text"] = test["text"].apply(lambda x: x.strip())
x_test = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
last_state = sequence_output[0]
x_start = layers.Dropout(.1)(last_state)
x_start = layers.Dense(1)(x_start)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Dropout(.1)(last_state)
x_end = layers.Dense(1)(x_end)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
for model_path in model_path_list:
print(model_path)
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds = model.predict(x_test)
test_start_preds += test_preds[0] / len(model_path_list)
test_end_preds += test_preds[1] / len(model_path_list)
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['text_len'] = test['text'].apply(lambda x : len(x))
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test["end"].clip(0, test["text_len"], inplace=True)
test["start"].clip(0, test["end"], inplace=True)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
test["selected_text"].fillna(test["text"], inplace=True)
```
# Visualize predictions
```
display(test.head(10))
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
# Inference only Text Models in `arcgis.learn`
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc">
<ul class="toc-item">
<li><span><a href="#Introduction" data-toc-modified-id="Introduction-1">Introduction</a></span></li>
<li><span><a href="#Transformer-Basics" data-toc-modified-id="Transformer-Basics-2">Transformer Basics</a></span></li>
<li><span><a href="#Prerequisites" data-toc-modified-id="Prerequisites-3">Prerequisites</a></span></li>
<li><span><a href="#Inference-only-models" data-toc-modified-id="Inference-only-models-4">Inference only models</a></span></li>
<ul class="toc-item">
<li><span><a href="#ZeroShotClassifier" data-toc-modified-id="ZeroShotClassifier-4.1">ZeroShotClassifier</a></span></li>
<li><span><a href="#QuestionAnswering" data-toc-modified-id="QuestionAnswering-4.2">QuestionAnswering</a></span></li>
<li><span><a href="#TextSummarizer" data-toc-modified-id="TextSummarizer-4.3">TextSummarizer</a></span></li>
<li><span><a href="#TextTranslator" data-toc-modified-id="TextTranslator-4.4">TextTranslator</a></span></li>
<li><span><a href="#TextGenerator" data-toc-modified-id="TextGenerator-4.5">TextGenerator</a></span></li>
<li><span><a href="#FillMask" data-toc-modified-id="FillMask-4.6">FillMask</a></span></li>
</ul>
<li><span><a href="#References" data-toc-modified-id="References-5">References</a></span></li>
</ul>
</div>
# Introduction
The pretrained/inference-only models available in `arcgis.learn.text` submodule are based on [Hugging Face Transformers](https://huggingface.co/transformers/v3.3.0/index.html) library. This library provides transformer models like `BERT` [[1]](#References), `RoBERTa`, `XLM`, `DistilBert`, `XLNet` etc., for **Natural Language Understanding (NLU)** with over 32+ pretrained models in 100+ languages. [This page](https://huggingface.co/transformers/v3.0.2/pretrained_models.html) mentions different transformer architectures [[2]](#References) which come in different sizes (model parameters), trained on different languages/corpus, having different attention heads, etc.
These inference-only classes offers a simple API dedicated to several **Natural Language Processing (NLP)** tasks including **Masked Language Modeling**, **Text Generation**, **Sentiment Analysis**, **Summarization**, **Machine Translation** and **Question Answering**.
The usage of these models differs from rest of the models available in `arcgis.learn` module in the sense that these models do not need to be trained on a given dataset before they can be used for inferencing. Therefore, these model do not have methods like `fit()`, `lr_find()` etc., which are required to train an `arcgis.learn` model.
Instead these model classes follow the following pattern:
- A model constructor where user can pass a pretrained model name to initialize the model.
- A `supported_backbones` attribute which tells the supported transformer architectures for that particular model.
- A method where user can pass input text and appropriate arguments to generate the model inference.
# Transformer Basics
Transformers in **Natural Language Processing (NLP)** are novel architectures that aim to solve [sequence-to-sequence](https://towardsdatascience.com/understanding-encoder-decoder-sequence-to-sequence-model-679e04af4346) tasks while handling [long-range dependencies](https://medium.com/tech-break/recurrent-neural-network-and-long-term-dependencies-e21773defd92) with ease. The transformers are the latest and advanced models that give state of the art results for a wide range of tasks such as **text/sequence classification**, **named entity recognition (NER)**, **question answering**, **machine translation**, **text summarization**, **text generation** etc.
The Transformer architecture was proposed in the paper [Attention Is All You Need](https://arxiv.org/pdf/1706.03762.pdf). A transformer consists of an **encoding component**, a **decoding component** and **connections** between them.
- The **Encoding component** is a stack of encoders (the paper stacks six of them on top of each other).
- The **Decoding component** is a stack of decoders of the same number.
<img src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAeAB4AAD/4QCCRXhpZgAATU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAABJADAAIAAAAUAAAAUJAEAAIAAAAUAAAAZJKRAAIAAAADNjkAAJKSAAIAAAADNjkAAAAAAAAyMDIwOjEyOjAyIDE1OjI0OjM4ADIwMjA6MTI6MDIgMTU6MjQ6MzgAAAD/4QGgaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49J++7vycgaWQ9J1c1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCc/Pg0KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyI+PHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj48cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0idXVpZDpmYWY1YmRkNS1iYTNkLTExZGEtYWQzMS1kMzNkNzUxODJmMWIiIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyI+PHhtcDpDcmVhdGVEYXRlPjIwMjAtMTItMDJUMTU6MjQ6MzguNjkwPC94bXA6Q3JlYXRlRGF0ZT48L3JkZjpEZXNjcmlwdGlvbj48L3JkZjpSREY+PC94OnhtcG1ldGE+DQo8P3hwYWNrZXQgZW5kPSd3Jz8+/9sAQwAGBAUGBQQGBgUGBwcGCAoQCgoJCQoUDg8MEBcUGBgXFBYWGh0lHxobIxwWFiAsICMmJykqKRkfLTAtKDAlKCko/9sAQwEHBwcKCAoTCgoTKBoWGigoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgo/8AAEQgBoQJWAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/aAAwDAQACEQMRAD8A+qaKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAI5pEhieSQhUUZJPYVgxatqV6PM07Tgbc/dklfbuHrirXi4lfDt4QcfKB+orRskWO0hVBhQigD04q1ZRuylormP9q1/wD6B9t/3+o+1a//ANA+2/7/AFb9FHMuwc3kYH2rX/8AoH23/f6j7Vr/AP0D7b/v9W/RRzLsHN5GB9q1/wD6B9t/3+o+1a//ANA+2/7/AFb9FHMuwc3kYH2rX/8AoH23/f6j7Vr/AP0D7b/v9W/RS5l2Dm8jA+1a/wD9A+2/7/Ufatf/AOgfbf8Af6t+ijmXYObyMD7Vr/8A0D7b/v8AUfatf/6B9t/3+rfoo5l2Dm8jA+1a/wD9A+2/7/Ufatf/AOgfbf8Af6t+inzLsHN5GB9q1/8A6B9t/wB/qPtWv/8AQPtv+/1b9FHMuwc3kYBvNeXk6bAw9Fm5NXdH1RdQWRWjaG4iOJIn6qf8K0q5+3GzxldBeA1spPuc0K0k9BrU6CiiioICiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAMJtSu2t7i8ijhNrC7DaSdzKpwTnoKsQ6i8kl+Aq7YFVkPrlc80yTR2IliS6kS1lYs8QUd+oB7A0650pnmle3uXgWZQsihQcgDHGehxV+6VoQNf3zNY+X9nC3S5G5T8p25Pemtq86XrxnyCqzrDsBO85x8w9uf0rSawQyWbISq2udq46gjFLaWMcE1xLwzzSeZkjkcAY/SjmiF0Uv7Uk/suW62LuSfygO2PMC06O5vjqxtibfywgkztOdpJGOvXikOj/vGQXL/AGRpfOMO0ctnPX0zV1bULqButxyYxHtx6EnNF0F10KPi7/kXLz/dH8xWpbf8e8f+6P5Vl+L/APkXbz/dH8xWpb/8e8f+6P5Un8AdCvNdmPUra12585Xbdnptx/jV2s64tpH1mznUDy40kVjnoTtx/KtGiVrKwnYM1Sn1S0gult5ZSsjEAfKcZPQZxirhrltUstTu5po2RmTz0eNxKFQICDjHUng06cVJ+8OKTepsza3p8MzRS3KqytsbIOFPoT0FTWOoW19v+zSbihwwIII9ODWPc6XPJZahGEUvNdLKvPVQV5/Q1oW1rImuXNwQBC8EaKc9SC2f5iqlGFtH/Wg2o20NOmTSLDE0khwijJOOgp9UtZW6bT5BY/6/IxyBkZ5xnvjNZxV3Ylbj7HULa+Dm2ctsOGBUqR+Bq1WHodpcxXt3PcRvGsqoFDyb24znP51uVU0k7Ickk9CleapaWcyxTyESMM7VUsQPU4HA+tV49csjPeRszp9lP7xmQhegPX8ara5bXT3Xm6fBItzs2rOsgC/RlPUVBdWF3Kur2/lArdLvSXcMBtgXBHXtVxjBpXZSjG2pvPdwJMInkCuUMmD/AHR1P61mRa5BcalBb2rq6OjuzEEEAYwRnqOetUbizv8AUrh2mgFshs3gGXDHcSPTtxTns769uoGmthbIltJAW3hjlgORjtxTUIJasajHqa9vq9jOZBFOD5alzwR8o6keo9xTE1uwk3iObcUQvwp+ZR3Hr+FYtnpU4gaOW2lE8ds8KSGfchJXHAz34+laEmnymfTCiqEghdH56ZUAfrQ4U09xOMbmhpeoQ6laLcW+4I3ZlII/OsyH/kc7j/r1X+dXdASaHTIYbmLy3iUR/eBDYHUVSh/5HO4/69V/nUpJOSQrWbsb9FFFZEBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGN4vBPh28x/dB/UVpWbBrWEqcgoCD+FPuIUnhkilG5HUqw9qwItO1iwQQ2F3BJbr9wTqSyj0yKtaqxS1VjoqKwPL8Rf89dP/75ajy/Ef8Az10//vlqOTzDl8zforA8vxH/AM9dP/75ajy/Ef8Az10//vlqOTzDl8zforA8vxH/AM9dP/75ajy/Ef8Az10//vlqOTzDl8zforA8vxH/AM9dP/75ajy/EX/PXT/++Wo5PMOXzN+iuatZtcuoy9tdaZKgZkLIGIDA4I/A1N5fiP8A566f/wB8tS5PMOXzN+isDy/Ef/PXT/8AvlqPL8R/89dP/wC+Wp8nmHL5m/RWB5fiP/nrp/8A3y1Hl+I/+eun/wDfLUcnmHL5m/RWB5fiP/nrp/8A3y1Hl+I/+eun/wDfLUcnmHL5m/WDAd3jO5x/DaqD7c0nleIjwZ7BR6hGJFXNG0v7D5ss8xnu5jmSQjGfQAdhQrRT1DY1KKKKgkKKKKACiiigAooooAKKKKACjNITiuR1f4gaFp141oss17drw0VnEZSp9CRxWlKjUrO1OLZE6kKavJ2OvzRXBf8ACxof4fD3iBvcWn/16X/hY0f/AELviH/wE/8Ar10fUMR/L+Rl9bpdzvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7yiuD/AOFjR/8AQu+If/AT/wCvR/wsaP8A6F3xD/4Cf/Xo+oYj+X8V/mH1uj3O8org/wDhY0f/AELviH/wE/8Ar0f8LGj/AOhd8Q/+An/16PqGI/l/Ff5h9bo9zvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7yiuD/AOFjR/8AQu+If/AT/wCvR/wsaP8A6F3xD/4Cf/Xo+oYj+X8V/mH1uj3O8org/wDhY0f/AELviH/wE/8Ar0f8LGj/AOhd8Q/+An/16PqGI/l/Ff5h9bo9zvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7yiuD/AOFjR/8AQu+If/AT/wCvR/wsaP8A6F3xD/4Cf/Xo+oYj+X8V/mH1uj3O8org/wDhY0f/AELviH/wE/8Ar0f8LGj/AOhd8Q/+An/16PqGI/l/Ff5h9bo9zvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7yiuD/AOFjR/8AQu+If/AT/wCvR/wsaP8A6F3xD/4Cf/Xo+oYj+X8V/mH1uj3O8org/wDhY0f/AELviH/wE/8Ar0f8LGj/AOhd8Q/+An/16PqGI/l/Ff5h9bo9zvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7yiuD/AOFjR/8AQu+If/AT/wCvR/wsaP8A6F3xD/4Cf/Xo+oYj+X8V/mH1uj3O8org/wDhY0f/AELviH/wE/8Ar0f8LGj/AOhd8Q/+An/16PqGI/l/Ff5h9bo9zvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7yiuD/AOFjR/8AQu+If/AT/wCvR/wsaP8A6F3xD/4Cf/Xo+oYj+X8V/mH1uj3O8org/wDhY0f/AELviH/wE/8Ar0f8LGj/AOhd8Q/+An/16PqGI/l/Ff5h9bo9zvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7yiuD/AOFjR/8AQu+If/AT/wCvR/wsaP8A6F3xD/4Cf/Xo+oYj+X8V/mH1uj3O8org/wDhY0f/AELviH/wE/8Ar0f8LGj/AOhd8Q/+An/16PqGI/l/Ff5h9bo9zvKK4P8A4WNH/wBC74h/8BP/AK9H/Cxo/wDoXfEP/gJ/9ej6hiP5fxX+YfW6Pc7ykrhP+FjR/wDQu+If/AT/AOvR/wALGj/6F3xD/wCAn/16PqGI/l/Ff5h9bo9zu6K4T/hY0f8A0LviH/wE/wDr0f8ACxo/+hd8Q/8AgJ/9ej6hiP5fxX+YfW6Pc7uiuE/4WNH/ANC74h/8BP8A69H/AAsaP/oXfEP/AICf/Xo+oYj+X8V/mH1uj3O7rJ8VzahB4fvH0e3a41AxlYUUgfMeM8+nX8K5r/hY0f8A0LviH/wE/wDr0jfEWMj/AJF3xD/4Cf8A16PqGI/l/FB9bo9zmvgI+tQx6jb31tI2ntKxWcuDtmBwykZzz/SvYa8Z+HfjNNL0i7hOi6xcb72WXdBb7lGT0Jz1rq/+FjR/9C74h/8AAT/69H1GvLVR/IPrVJaNnd0tcH/wsaP/AKF3xD/4Cf8A16P+FjR/9C74h/8AAT/69H1DEfy/iv8AMPrdHud3RXCf8LGj/wChd8Q/+An/ANej/hY0f/Qu+If/AAE/+vR9QxH8v4r/ADD63R7nd0Vwn/Cxo/8AoXfEP/gJ/wDXo/4WNH/0LviH/wABP/r0fUMR/L+K/wAw+t0e53dLXB/8LGj/AOhd8Q/+An/16P8AhY0f/Qu+If8AwE/+vR9QxH8v4r/MPrdHud5RXB/8LGj7+HfEH/gJ/wDXo/4WZpcRH9oWGrWKd3ntWCj64zR9QxHSILF0f5jvM0VS0zUrTVLRLrT7iK4t35V42yKujpXI04u0lqbppq6CiiikMKKKKACiiigDiPiTqF2YtO0PTZTBdatN5BmHWOMDLMPfFXok0DwHoAL+Va2yYBcjLyt/Niay/FPzfFLwah6eXdN+SVV1SFNW+K0cF3+8t9NsRPFG3KiRmxux64rtrydOhTgtmrvzd2v0OWnaVScn00/BFofEm1fm38PeI7iPs8djkH9aX/hY8f8A0K3ij/wA/wDsq6fGOmPTpRXn+08jbmZzH/Cx4/8AoVvFH/gB/wDZUf8ACx4/+hW8Uf8AgB/9lXT0Ue08h8xzH/Cx4/8AoVvFH/gB/wDZUf8ACx4/+hW8Uf8AgB/9lXT0Ue08g5jmP+Fjx/8AQreKP/AD/wCyo/4WPH/0K3ij/wAAP/sq6eij2nkHMcx/wseP/oVvFH/gB/8AZUf8LHj/AOhW8Uf+AH/2VdPRR7TyDmZzH/Cx4/8AoVvFH/gB/wDZUf8ACx4/+hW8Uf8AgB/9lXT0Ue08g5mcx/wseP8A6FbxR/4Af/ZUf8LHj/6FbxR/4Af/AGVdPRR7TyDmZzH/AAseP/oVvFH/AIAf/ZUf8LHj/wChW8Uf+AH/ANlXT0Ue08g5jmP+Fjx/9Ct4o/8AAD/7Kj/hY8f/AEK3ij/wA/8Asq6eij2nkHMcx/wseP8A6FbxR/4Af/ZUf8LHj/6FbxR/4Af/AGVdPRR7TyDmOY/4WPH/ANCt4o/8AP8A7Kj/AIWPH38L+KB/24f/AGVdPRR7TyDmMjQPHWkaxfCxAubK+YZW3vITEzfTsa6yvPPidYxz+Fri8QBLyxxcQSjhkZSD/Ku30a6N9pNldkYM8KSY9MqDVJ3Vxp3LmKMUUUygxRiiigAxRiiigAxRiiigAxRiiigAxRiiigAxRiiigAxRiiigAxRiiigDM8Qa5p+gWDXeqXCwxA7RkZLH0AHJNcqPiVav81v4e8STxno8djlT9PmqrqUSav8AFjybweZb6bYrLFG3KiRm+9j1xXZdOlMuMOZXOY/4WPH/ANCt4o/8AP8A7Kj/AIWPH/0K3in/AMAP/sq6eigr2RzH/Cx4/wDoVvFH/gB/9lR/wseP/oVvFH/gB/8AZV09FAeyOY/4WPH/ANCt4o/8AP8A7Kj/AIWPH/0K3ij/AMAP/sq6eigPZHMf8LHj/wChW8U/+AH/ANlR/wALHj/6FbxR/wCAH/2VdPRQHsjmP+Fjx/8AQreKP/AD/wCypG+I0ZH/ACK/ij/wA/8Asq6iimHsjzPwL4pl0DS7q2vPDXiJ5JbuScGKxJG1jkdSOa6X/hY8f/QreKf/AAA/+yrp6KA9kcx/wseP/oVvFH/gB/8AZUf8LHj/AOhW8Uf+AH/2VdPRQHsjmP8AhY8f/QreKf8AwA/+yo/4WPH/ANCt4o/8AP8A7KunopB7I5j/AIWPH/0K3ij/AMAP/sqP+Fjx/wDQreKf/AD/AOyrp6KA9kcx/wALHj/6FbxR/wCAH/2VH/Cx4/8AoVvFH/gB/wDZV09FAeyOY/4WPF38L+KAP+vD/wCyq/onjXRtduzp7JcWt4wOLW9h8tmHsDwfpWxXH/FGyjl8LTX6DZe2BW4gmH3kII7015ClTsiO+tE8FeMtOu9O/c6Rq0v2a5t1+4kp+66jtnvXpI6V5v8AEy4Nz4N0K8PDPfWkox2yf/r16QOldeJbqUqdWW7uvW2352OKilGpOC20f3hRRRXEdIUUUUAFFFFAHBeJv+Sr+Df+uV3/AOi6hi/5K9qn/YMi/wDQjU3ib/kq/g3/AK5Xf/ouoYv+Su6p/wBg2L/0OuvF/wAOn/h/9ukclH4qnr+iOuooorzDYKKKKBBRRRQAVQ167ksND1C7h2mW3t5JV3DIyqkjNX6ranaJqGm3VnKzKlxE0TMvUBgRkfnTW4zjW8San/wj5u7O4s766aaKNY1gaMDd1HJ6+9WpvGSrHNcwxh7ZNP8AtOzo4k37dp9MHitaPQWNrDBdahcXCQyxyx7lQY2HgcDoart4R09r/U7hjJ5eoQmKWAHCDPUj0JOD9aq6Agu9R1rS9LN1fNZzzTGOKGGJCoWR2AAJycgZ/SmXOs6rpVzNa6ibad3s5bmCWJCoVkHKsCT6jmr7eHjPp0llfajdXMJCiMsEDRlTlWBA5II6mkHhtJZJpr6+ubueS3e1WRwo8tG64AAGT6mi6AoWet6pbyaaNT+yzR6jCXieFChjcJu2kEnIxnn2rLs/Ft/L4VvNUN3ZyTxQrIIVgZRGS2OSTyK6Ox8NRwTW8lze3N21tEYYBJtCxgjBIAA5xxmoIfCoXQ5NJl1G5ls2jEaqyoCgBB4IHt3ovEB/hHWbnVJLyOd7e4igKBLq3UhHJGSvJPI4710dZ2n6TFYX11PbyOqXG0tDxsDAY3D0JGM/StGpl5CCiiikAUUUUAFFFFAHPfED/kStZ/69n/lV7TNZsdC8C6Re6pOILYWsClypOCUGOBVH4g/8iXrP/Xs/8qv6dpNrrXw+0/T79A8E1jEpBHQ7Bgj3FbQ2LjsUR8T/AAkemrKf+2L/AOFL/wALN8J/9BUf9+ZP8K+edQ0rUPCviybTzcPbzxvhJFfYJFP3Tntn8q9T8K69r0pMEa6bqd1GMyWN5ELa5I/2SPlYe9aOKKudp/ws3wn/ANBUf9+JP8KP+Fm+E/8AoKj/AL8Sf4VUg8aaTbyCLxDpFxospON1zbgxE+gdQRXV6fJpOow+bYGzuY/70W1h+lKwHP8A/CzfCf8A0FR/34k/wo/4Wb4T/wCgqP8AvxJ/hXVfYrb/AJ94f++BS/YrX/n3h/74FGgzlP8AhZvhP/oKj/vxJ/hR/wALN8J/9BUf9+JP8K6v7Fa/8+8P/fAo+xWv/PvD/wB8CjQDlP8AhZvhP/oKj/vxJ/hR/wALN8J/9BUf9+JP8K6v7Fa/8+8P/fAo+xWv/PvD/wB8CjQDlP8AhZvhP/oKj/vxJ/hR/wALN8J/9BUf9+JP8K6v7Fa/8+8P/fAo+xWv/PvD/wB8CjQDlP8AhZvhP/oKj/vxJ/hR/wALN8J/9BUf9+JP8K6v7Fa/8+8P/fAo+xWv/PvD/wB8CjQDlP8AhZvhP/oKj/vxJ/hR/wALN8J/9BUf9+JP8K6v7Fa/8+8P/fAo+xWv/PvD/wB8CjQDgvEPxX0Gy0uSfS7lby6UrthKOm4ZGeSOOM1reCfHuk+LV2WTSRXarueCQcj6Hoa1tf8ADena5pklheQhYJCpYxgK3BB6/hU+i6Jp2iWot9LtIraIDHyLyfqepo0A422/5K5rP/XhD/6FXX1yFt/yVzWf+vCH+ddfTN6fwhRRRSLCiiigAooooAralO1tp1zOgBaOJnGemQM1zOma7qcTaOdU+yzQ6pHmNoUKGN9m7BBJyMZ5rqryAXVrNAxIWVChI6gEYrE07wzHay2clxeXN2bOPy7dZNoWPjGcADJx600J36GHpHi65k0W61G4uLa4eOHeLWOFkIYtgAsSQecVpX2oa1pOmie9eynnuJI4II40ZQkjtjk5OQOtWbPwykOlSaZPfXFxYvEYhE4UbQe4IAORT5PDxuNPa0v9RurmP5TGzBFaNlOQwIHXI70E6mfd63qmkTXVpqH2aeY2Ul3BLEhUZTqrAk+o5zS22tanaz2Mep/ZpUv4WeKSFSux1TdtYEnjHfNXf+EaSZ7mW+vbi7uJrdrUSOFHlo3XaAMZPrS2fhuOKeKa6vLm8kghMMPmBQI1IwSAAOcdzQFmc7beLL5/Ct1qhu7OSdIUfyxAyiMswBySeR9K3/CWsXOqNepO1vcRwMqpdW4KpJkZIwc8joabF4XA0RtKm1G5mtNixorKgKBSCOQOenetPTtKi0+8u5rd3WO5IZoeNisBgsPc96Bq5o0UUUigooooAKKKKACub+I3/Ik6v/1wNdJXN/Eb/kSNX/64mmhS2ZkePP8AknHhv/r4sv6V6gOgry/x5/yTjw3/ANfFl/SvUB0FdVX/AHan6y/Q82n/ABp/L9QooorjOkKKKKACiiigDgvE/wDyVfwd/wBcrv8A9F1V8XSN4b8cW/iG4jZtLurYWdxIoJ8lg2Qxx27Va8Tf8lX8G/8AXK7/APRddzLEk0bRyorxsMMrDIIrrxXwUv8AD/7dI5aCvKp6/ojm7fX9JuIxJDqdk6EZBEy8/rUv9r6b/wBBC0/7/L/jUdx4C8L3EheXRbMs3JIXb/Kov+FdeEv+gJa/r/jXDyI35Cz/AGvpv/QQtP8Av8v+NH9r6b/0ELT/AL/L/jVb/hXXhP8A6Alr+v8AjR/wrrwn/wBAS1/X/Gj2a7hyFn+19N/6CFp/3+X/ABo/tfTf+ghaf9/l/wAarf8ACuvCX/QEtf1/xo/4V14T/wCgJa/r/jRyLuHIWf7X03/oIWn/AH+X/Gj+19N/6CFp/wB/l/xqt/wrrwn/ANAS1/X/ABo/4V14T/6Alr+v+NHs13DkLP8Aa+m/9BC0/wC/y/40f2vpv/QQtP8Av8v+NVv+FdeE/wDoCWv6/wCNH/CuvCf/AEBLX9f8aPZruHIyz/a+m/8AQQs/+/y/40f2vpv/AEELT/v8v+NcT8TtA8JeGPCtzcxaLai7l/c2/X7579ewyab8LtB8JeKPCsFxLo1qbyH91cAgglh369xz9c0/ZrcXKdx/a+m/9BC0/wC/y/40f2vpv/QQtP8Av8v+NVv+FdeE/wDoCWv6/wCNH/CuvCf/AEBLX9f8aXs13HyFn+19N/6CFp/3+X/Gj+19N/6CFp/3+X/Gq3/CuvCX/QEtf1/xo/4V14T/AOgJa/r/AI0ci7hyFn+19N/6CFp/3+X/ABo/tfTf+ghaf9/l/wAarf8ACuvCf/QEtf1/xo/4V14T/wCgJa/r/jRyLuHIWf7X03/oIWn/AH+X/Gj+19N/6CFp/wB/l/xqt/wrrwn/ANAS1/X/ABo/4V14T/6Alr+v+NHs13DkLP8Aa+m/9BC0/wC/y/40f2xpo66haf8Af5f8arf8K68J/wDQEtf1/wAaUfDvwmOmiWv6/wCNHs13DkOY8ba7b61Zt4d0GVb3Ub4iNvJO9YUyNzMRwOK9K0+2Wzsba2T7kMaxj6AYqtpOiabpEZTS7G3tVPXykAJ+prRqkrKyKSseZfG7wl/bGijVbKMG/sAWIC58yPuPw6/nXn/he+sb7S7dNZ806ejCOK+jYifTpOwLdfLPbP8ASvoxlDKQ3IPBB714H4k0STwZ4yleyg8/TL5WYWzDKzp1kh+o+8tWn0Bo7YazqPh2NbfxbEuqaI4xHqkSB/lPTzV/qK0G8E+HdUjS/wBCkewkcZS502bYD+A4P5VgaDqy+GraBjIb7wXff6mZhvayY/8ALN/9nPHtWxceF7vR5TqngW5jjST95Jp7tm3n91P8J+nFIB/2LxrovFjf2muWo6R3a+VMB/vDg/jT4/iBDZuIvEul3+jy9C8kZkiPuHXtWh4Z8YWmrTtY3cb6frEf+ss7jhvqp/iH0rpJYkmQrKiuh6qwyDQBV0zVbDVIRLp95BcoRkGKQNV3iuT1P4faDeTGe3gk0666iaxcwkH6Dj9Kpf2X4y0XnTtVt9Zt1/5Y3yeXIR6Bxx+dAandUVwyePhp7eX4o0e/0l+hl2ebCf8Aga/4V1Ola1purRCTTb23uV/6ZyBsfX0pDNCijNFABRRRQAUUUUAFFFFAHm/iyU+GvHUWv3MbnSry2FpPKoz5Lg5BPsa6GDXtJnjDxanZsrDIPnL/AI10c0Mc8TRzIkkbDDKwyCPcVzc/gDwtPIXk0Sz3E5O1do/IU7lxm46Ev9sab/0ELP8A7/L/AI0f2xpv/QQs/wDv8v8AjVb/AIV34T/6Adr+v+NH/Cu/Cf8A0A7X9f8AGi6H7Rln+2NN/wCghZ/9/l/xo/tjTf8AoIWf/f5f8arf8K78J/8AQDtf1/xo/wCFd+E/+gHa/r/jRdB7Rln+2NN/6CFn/wB/l/xo/tjTf+ghZ/8Af5f8arf8K78J/wDQDtf1/wAaP+Fd+E/+gHa/+Pf40XQe0ZZ/tjTf+ghZ/wDf5f8AGj+2NN/6CFn/AN/l/wAarf8ACu/Cf/QDtf1/xo/4V34T/wCgHa/r/jRdB7Vln+2NN/6CFn/3+X/Gj+2NN/6CFp/3+X/Gq3/Cu/Cf/QDtf1/xrP8AEHg/wboujXeo3OiWgit4y5HPzHsOvc4oug9qzZ/tfTf+ghZ/9/l/xo/tjTf+ghZ/9/l/xryr4O2XhnxPFfWup6PaG+ikMqDBGYyeg57Hj6Yr0z/hXfhP/oB2v6/409A9qyz/AGxpv/QQs/8Av8v+NH9sab/0ELP/AL/L/jVb/hXfhP8A6Adr+v8AjR/wrvwn/wBAO1/8e/xpaB7Rln+2NN/6CFn/AN/l/wAaP7Y03/oIWf8A3+X/ABqt/wAK78J/9AO1/X/Gj/hXfhP/AKAdr+v+NF0HtGWf7Y03/oIWf/f5f8aP7Y03/oIWf/f5f8arf8K78J/9AO1/X/Gj/hXfhP8A6Adr+v8AjRdB7Rln+2NN/wCghZ/9/l/xo/tjTf8AoIWf/f5f8arf8K78J/8AQDtf/Hv8aP8AhXfhP/oB2v6/40XQe0ZY/tjTBydRs/8Av8v+Nch431qDXbX/AIRzQJVvdQvWVH8k7lhjyCzMRwOK6hfh34TByNDtR+f+NbelaNp2kRlNMsre1U9fLQAn6nqaLoTqNqxxfxWt1tPB+k26fdiv7VB9AcV6IOgrgvjJ/wAi3Yf9hK2/9CrvR0FddX/dqfrL9Djp/wAafy/UKKKK4zpCiiigAooooA4LxN/yVfwb/wBcrv8A9F10HirxLZeG7SOW88ySWZtkFvEu6SVvRRXPeJv+SreDf+uV3/6LqC5UXnxek88b1stNVoQeQrM3JHvXXinanS/w/wDt0jlo/FU9f0RKPF/imT5ovBsgQ8jfeorfiMUf8JX4t/6E7/yfT/Curorg9obczOU/4Svxb/0J3/k+n+FH/CV+Lf8AoTv/ACfT/Curoo5w5mcp/wAJX4t/6E7/AMn0/wAKP+Er8W/9Cd/5Pp/hXV0Uc4czOU/4Svxb/wBCd/5Pp/hR/wAJX4t/6E7/AMn0/wAK6uijnDmZyn/CV+Lf+hO/8n0/wo/4Svxb/wBCd/5Pp/hXV0Uc4czPP/Emo+JNd0q4s7nwVCWkjZI5Xuo3MRIxuHFJ4Yv/ABHoGk21nbeCoQ8cSpJKl3GhlIGNxwOteg0Ue0YczOU/4Svxb/0J3/k+n+FH/CV+Lf8AoTv/ACfT/Curoo5w5mcp/wAJX4t/6E7/AMn0/wAKP+Er8W/9Cd/5Pp/hXV0Uc4czOU/4Svxb/wBCd/5Pp/hR/wAJX4t/6E7/AMn0/wAK6uijnDmZyn/CV+Lf+hO/8n0/wo/4Svxb/wBCd/5Pp/hXV0Uc4czOU/4Svxb/ANCd/wCT6f4Uf8JX4s/6E7/yfT/Curoo5w5mc9pPjotqUOn+IdLn0e5nO2FpGDxSH0Djofau2rgfiZaR3PgvUWYfvIE8+Jh1VlOciuv0C5a90PTrmT781vHI31Kg1ad1cqLuX6wPGugr4g0SS3RvLu4z51tN0Mcq8qc1v0HpTKPFPDeqLpXmvfwAaJeym01S1YZFpddC2P7jdfbPtXSQy3Pw+uljmd7rwnO37uXO5rInoD6p79qTx1p0Oi61/bMkQk0fUVFnqsWOADwsv1B71L4XuP7LvW8Ja4y3NlMhbTp5MMJ4f+eZPcgfpVCOk1/w9pPiixia4RWYDfBdQth4/QqwrnV1fXPBrCHxEr6powOF1KJMyRD/AKar3+oqJTc/Du8Ct5tz4TmfhuWaxY9vdP5V6BFJDeWyyRsk0Eq5Ug5Vgf50gI9Nv7XUrSO6sZ457eQZV0bINWq4bUvCF1pV1JqXgy4WzuGO6Wyfm3n/AA/hPuKveHPGVvqN1/ZuqQPpesr961n43+6N0YUDOpZFdSHUMD1BGa5fVvAegX8xnSz+xXXUT2bGFwfX5eP0rqQc0tIDhf7F8X6L82j61FqkK9LbUk+bH/XReSfrTh45uNNOzxToV9p3/TeJfPh+pZeR+VdxSMoYYYAj0NO4rGZo3iDStai36Zf29wO4VxuH1XqK1K5nWfA2gatJ509ikNz/AM97Y+U/5r1/Gss+HvFOjn/iReIBewL9221NN5+m8c4oA7qiuFHjW/0s7fE/h+8tEXhrm1/fxD345ArotG8S6PrQ/wCJZqFvO39wNhv++TzRYLmxRSFgOtGc0hmH4q8TWPhu1jku/MlnmbZBbwruklb0A/rXODxf4pkG6Lwa+09N96in8sVFKgvPi9cmYBxZacvlA87Szcke9dhTNIw5lc5T/hK/Fv8A0J3/AJPp/hR/wlfi3/oTv/J9P8K6uigr2SOU/wCEr8W/9Cd/5Pp/hR/wlfi3/oTv/J9P8K6uigPZI5T/AISvxb/0J3/k+n+FH/CV+Lf+hO/8n0/wrq6KA9kjlP8AhK/Fv/Qnf+T6f4Uf8JX4t/6E7/yfT/CurooD2SOU/wCEr8W/9Cd/5Pp/hUF54h8S3sDQ3fgeKeI9UlvI2U/gRXZUUB7JHl/g1fEPhqGYR+C4JZ5JnkEwuY1ZVY/cBxnArpf+Er8W/wDQnf8Ak+n+FdXRTD2SOU/4Svxb/wBCd/5Pp/hR/wAJX4t/6E7/AMn0/wAK6uikHskcp/wlfi3/AKE7/wAn0/wo/wCEr8W/9Cd/5Pp/hXV0UB7JHKf8JX4t/wChO/8AJ9P8KP8AhK/Fv/Qnf+T6f4V1dFAeyRyn/CV+Lf8AoTv/ACfT/Cj/AISvxb/0J3/k+n+FdXRQHskcp/wlni3/AKE7/wAnk/wq1pHjrzNUh07xBpk+j3c5xCZGDxSH0Djv7V0Ncn8ULVLjwXfyMMSW4WaNx1VlIwRQJ00lck+Mf/It2H/YSt//AEKu9HQV5r8SZ2uvAeh3D/elvLNz9SQa9KXoK66v+7U/WX6HDT/jT+X6hRRRXGdIUUUUAFFFFAHBeJv+SreDf+uV3/6LqGL/AJK9qn/YMi/9CqbxN/yVfwb/ANcrv/0XUMZx8XtTz/0DIv8A0KurF/w6X+H/ANukclH4qnr+iOuooorzTYKKKKACiiigArN8SXEtp4e1O5t22TQ20kiNjOGCkg1pVBfWsV9ZT2twpaGeNo3AOMqRg0IDhpL/AFgeHEltL2+F7NPCiNewIijd1AwOQfXrT5vF9xsvLmEYNvp5d7Zx/q5xJtIPfjP5V0tt4etIYFhaS7mjSRJUE07PtKnjGeg9qcfD2mnUL28NuDLexeVOCfldfcVd0Bj6lJqmi6OLltUe7uLl4oV8yJRHCzsBuGBnAz0JqK/vtS0W8ls5NQe8E1hNcRySRqHidB/sjBBz3HUVsw+GrGOzmtXa6nt5ECeXNOzhAOm3J4xgcjnii28N2EH2gt9onkmiMDSTzM7BD/CCTwKLoDFtNQ1KxfR/tN+17HqULEiSNVaJxHvyNoGR259RWRaa3qv/AAhN5qb3uom7ECsrS26LGGLAEphfm49fWux03w3p9hIJIxNLIsZiRp5WkKJ6Lnp+FRW/hTT4NPlsQ929pIgTynuGZVAOeAelF0BW8F6jeX8l+s1xLdWkLKsUs8Qil3YywKgDgcY4HWuoqnbadb219PdwqyyzqqyfMcNjoceuOM1cqXvoAUUUUgCiiigAooooA574g/8AIlaz/wBez/yrf8H/APIqaN/15xf+gCsD4hHHgrWc/wDPs4/St/wiCPCujg8EWcQ/8cFaw2Kia9FFFWWVtSsodRsZ7O6QSQTIUdT3BrzKx0tr61uvB2qytFqemET6XdnhjGPuMD7dDXq1cd8QdJuJIbfW9IX/AIm2mHzUA/5ax/xxn1yKaAk8H60ddsbrStbhRdWs/wBzeQOPlcf3wO6msbF18PLzKiS58JzPyB8z2LH+afyp2sRnWtP0/wAZeFv+QlDHuaPp58f8cTD1HOPcV1mh6pY+JtCjuYAslvOpWSJxnaehRh7UCNO1niuoI54HWSGRQyOpyCD0NZniTw7p3iC18nUYNzLzHMnyyRn1VuorkJI7r4eXjSwCS48KzNmSMfM1kxPJH+x/KvQbO5hu7WO4tZFlhkG5HU5BBpDODXUNe8F4TWBLq+hDhb2NczwD/pov8QHqK7bSdSs9Wso7rT7iO4gcZDocj/6xq26hlwQCD1BritU8HTWN5Jqng+5GnXzHdJbHm3n9dy9j7imB21Fcj4e8ZRXN4NM123bStZHHkyn5JfeNujD9a67NIAoxRRQAm0elc7rPgvQdWYvc6dEk/UTwfu5AfXK966OigDyTxxoXiHw/4enTQtY1PUILj9x9jki86QA9w45GK2vhTqPiWTTvsXibTbmLyl/dXUowWH91h1z74r0GincDzu2/5K5rP/YPh/nXX1yNuMfFzWc99PiI/wC+q66mb0/hCiiikWFFFFABRRRQBV1WV4dLu5Ym2yJEzKfQgcVyem6jqdmfD73V+17HqkfzpJGqtE3l78qVA47cjvXZXEKXFvJDKMxyKUYZxkGsnS/DWn6bJHJCJpZIk8uJp5Wk8tfRcnj8KaJab2OV0nXtRj8NXWqTz3886wblFxAqQbi2AVIAJArW1R9U0XS45Tqkl3c3csVurSRIFiZ2ALAADjnofatOx8M2Fnby26tcy20kZiME07OgU9gD0oi8NWKWU1pK11cW0ihdk87OEA6bcngj1HPFAWZiajqGpaJd3Vk+oPeB9Omuo5ZEUPE6Y/ujBBz3FPh1DUtOudLiuL5r2PULd2/eRqrxOqbsjaACO3IrZt/DVhClyG+0TvcRGB5J5mkcIf4QSeBzS6d4csLGTzEWaaXyzErzzNIUQ9lyeB9KBWZx9vrWqDwVdak17qRufIjcPNboqAlgCUwvPXvXR+C7+7vjf+dcSXNpFIqRSzRCOQtj5gy4HQ+wqe38K2EOnyWW+7ktXVV8qSdmVQDkYyeOladtp1vbX1xdQqyyXAUSDd8p2jAOPX3o0Gk7lyiiikUFFFFABRRRQAVzfxG/5EnV/wDrga6Sua+I5x4I1fP/ADxP9KaFLZmT48/5Jx4b/wCviy/pXqA6CvL/AB6MfDnw4D1FzZD+VeoDoK6qv+7U/WX6Hm0/40/l+oUUUVxnSFFFFABRRRQBwXif/kq/g7/rld/+i6Txrp+oab4htvE+k2z3oWA215bJ99o85DL6kGl8Tf8AJV/Bv/XK7/8ARdd4K68V8FL/AA/+3SOWhrKp6/ojzhfiRoOP3v22Ju6vauCP0pf+FkeHv+et1/4DP/hXopjQ9VB/Ck8pP7q/lXFyROjlPO/+FkeHv+et1/4DP/hR/wALI8Pf89br/wABn/wr0Tyk/ur+VHlJ/dX8qOSIcp53/wALI8Pf89br/wABn/wo/wCFkeHv+et1/wCAz/4V6J5Sf3V/Kjyk/ur+VHJEOU87/wCFkeHv+et1/wCAz/4Uf8LI8Pf89br/AMBn/wAK9E8pP7q/lR5Sf3V/KjkiHKed/wDCyPD3/PW6/wDAZ/8ACj/hZHh7/nrdf+Az/wCFeieUn91fyo8pP7q/lRyRDlPOv+Fk+HR/y1uv/AZ/8KX/AIWT4d/563X/AIDP/hVz4va7/YHhKf7LGTd3X7mMqv3AR8zfgP5034O65/b3hGBbqMi7tAIXLD74A+VvxH8qPZxFylX/AIWR4e/563X/AIDP/hR/wsjw9/z1uv8AwGf/AAr0Tyk/ur+VHlJ/dX8qOSI+U87/AOFkeHv+et1/4DP/AIUf8LI8Pf8APW6/8Bn/AMK9E8pP7q/lR5Sf3V/KjkiHKed/8LI8Pf8APW6/8Bn/AMKP+FkeHv8Anrdf+Az/AOFeieUn91fyo8pP7q/lRyRDlPO/+FkeHv8Anrdf+Az/AOFH/CyPD3/PW6/8Bn/wr0Tyk/ur+VHlJ/dX8qOSIcp53/wsjw9/z1uv/AZ/8KP+FkeHv+et1/4DP/hXonlJ/dX8qPKj/uL+VHJEOU8r1bU7jx5CNH0Kzuk0+V1+2Xs0ZjVUByVXPUmvUraJIII4YxhI1CqPQAVIoAGAAB7UtUkkrIaVgooooGFIRxS0UAee2Y/4Q3xm1o3y6HrTmSE9FguMcr7Bu1JrEb+CPED63aKToV84GoQqOIXPSUD09a6rxdokPiDQ57GY7WYbopB1jkH3WH0NZXgzVP7f0S503WolOpWhNrfQuOGPTd9GFMR06mG8tQfkmglTPqrKR+orgJoLr4e3rXNmslx4VmfM0ABZrJj1Zf8AY9u1S+HLiXwfrg8Oai7NpdwS2mXLnp6wsfUdq76SNZY2SRQyMMFSMgigCOyu4L61jubSVZYJFDI6nIIqevObq3uvh/eveWCSXPheZs3FsvzNaE/xp/seo7V31he29/ZxXVnKs1vKu5HU5BFIZT8QaDp2v2X2bU7ZZVHKN0aM+qnqDXIi41/wQdt75ut6AvS4UZuLdf8AaH8QHr1r0KkIzweRQBR0XVrLWbJLvTbmO4gYfeU9PYjsav1xWseDHhvX1XwpcjS9TY5kQDMFx7Onb6ipNC8ZK96uleI7Y6Tq/RUkP7qb3jbofpTA7GijOaKQBRRRQBwHjSw1DSvEkHifSrZ7xBAba8to/vlM5Dr6kVWX4kaDj94b2Nu6vauCP0r0imeWndV/KncpSa2PO/8AhZHh7/nrdf8AgM/+FH/CyPD3/PW6/wDAZ/8ACvRPKT+6v5UeUn91fyouh+0ked/8LI8Pf89br/wGf/Cj/hZHh7/nrdf+Az/4V6J5Sf3V/Kjyk/ur+VF0HtJHnf8Awsjw9/z1uv8AwGf/AApP+FkeHv8Anrdf+Az/AOFei+Un91fyo8pP7q/lRdB7SR53/wALI8Pf89br/wABn/wo/wCFkeHv+et1/wCAz/4V6J5Sf3V/Kjyk/ur+VF0HtJHnf/CyPD3/AD1uv/AZ/wDCj/hZHh7/AJ63X/gM/wDhXonlJ/dX8qyfFepRaFoF5qDRbzChKIFyWY8KPzxRdB7SRyA+JPh09Jrn/wAB3/wpf+FkeHv+et1/4DP/AIVj/AvxRPqiX2napvecSNcRSMvBBPzLn2Jz+NeueUn91fypuwKpI87/AOFkeHv+et1/4DP/AIUn/CyPD3/PW6/8Bn/wr0Xyk/ur+VHlJ/dX8qV0HtJHnf8Awsjw9/z1uv8AwGf/AAo/4WR4e/563X/gM/8AhXonlJ/dX8qPKT+6v5UXQe0ked/8LI8Pf89br/wGf/Cj/hZHh7/nrdf+Az/4V6J5Sf3V/Kjyk/ur+VF0HtJHnX/CyPD3/PW6/wDAZ/8ACl/4WR4e/wCet1/4DP8A4V6J5Sf3V/Kjyk/ur+VF0HtJHnX/AAsjw92luj/27P8A4Vn6rqNx49jXR9CtLlNOkdTeXs8ZjVUBB2rnqTXqvlIOir+VOAC9AAKLic21Y8/+Lkaw+FdNjjGETUbZQPQBq9BHQVwXxj/5Fuw/7CVt/wChV3o6Cuur/u1P1l+hyU/40/l+oUUUVxnSFFFFABRRRQBwXib/AJKv4N/65Xf/AKLrX8YeKU0AW1vbW73uqXZ229rGcFsdWJ7AVkeJv+Sr+Df+uV3/AOi6gZRN8YLsyc+RpaCPP8OX5rrxX8Ok/wC7/wC3SOWh8VT1/RCjUPiHL8y2vh2EH+CR5WI/EcUv2z4if88/DP5zV1tFef7Rm3MzkvtnxE/55+Gfzmo+2fET/nn4Z/Oautoo9ow5mcl9s+In/PPwz+c1H2z4if8APPwz+c1dbRR7RhzM5L7Z8RP+efhn85qPtnxE/wCefhn85q62ij2jDmZyX2z4if8APPwz+c1H2z4if88/DP5zV1tFHOw5mcPq0fj/AFTTbqynTw0sVxG0TMrTZAIwccU3Ro/H+laXaWNunhsxW8SxKztLkgDjPvXdUUe0YXZyX2z4if8APPwz+c1H2z4if88/DP5zV1tFHtGHMzkvtnxE/wCefhn85qPtnxE/55+GfzmrraKOdhzM5L7Z8RP+efhn85qPtnxE/wCefhn85q62ij2jDmZyX2z4if8APPwz+c1H2z4if88/DP5zV1tFHtGHMzkvtnxE/wCefhn85qPtnxE/55eGT+M1dbRR7RhzM5S18Y6xpWoW1t4w063t4LlxHHe2jlog56BgeRn1r0AciuE+JMCXHgjVhIAdkPmL7FTkH9K6nw1O9z4e0yeU5eS2jdj6kqK0i7q5UXc0qKKKZQUUUUAFcF40hfw5rlt4rslPkDEGpRr/ABxHo/1Wu9qK6t47q3lguEWSKRSjqw4IPUUAY/iHSLPxToJgdgUkUSQTp1jbqrqazfA2u3Nw0+ia3hNasAFk/wCm8f8ADIPr3ql4HuJNA1e68J37krEDPp8rf8tISfu/VT+lXvHOg3F4sGsaLiPXNPy8J7TL3jb1Bp+QjrJEV0ZHUMrDBBGQRXnl7aXXgC9kv9Kje48NzNuurNeTak9XjH931FdZ4U1638Q6THeQZSQEpNC33opB95TWuyh1IYAqRgg96QyDTb631Kyhu7KZJreZdyOpyCKs153f2V34Dv5dT0iJ5/D0zbryxTk25PWSMenqK7rTb+21Kxiu7GZZreVdyOp4NAFqs3XdE0/XbJrTVLZJ4jyNw5U+oPUGtKigDz3Ov+CDz52uaAv43Nsv/s4FdhoWt2Gu2K3Wl3KTxHg46qfQjqDWkeRXHa74MD3rar4buDpWsdSyD91N7OnQ/WmB2NFcVo/jPyLsab4stxpWogfLIx/cT47o/wDQ10uh6xZa5YC802YTW5ZkDDjkHBpWAyfGHildBNta2ls99q12SLe1Q4JA6sx7AVhf2h8Q5PmW18Owg8hHeVmH1I4pFAn+L+os/Jt9OjVM/wAOW5rr6o0hBSV2cj9s+Iv/ADz8M/nNR9s+Iv8Azz8M/nNXXUUFeyRyP2z4i/8APPwz+c1H2z4i/wDPPwz+c1ddRQHskcj9s+Iv/PPwz+c1H2z4i/8APPwz+c1ddRQHskcj9s+Iv/PPwz+c1H2z4i/88/DP5zV11Jmi4ezRyX2z4i/88/DP5zUNdfERhgxeGfzmrrc0tAezied+HNN8d+H7SW3s18POkkzzkyNKTuY5I4A4rW+2fEX/AJ5+GfzmrrqTNFw9lE5L7Z8Rf+efhn85qPtnxF/55+GfzmrrqKA9mjkftnxF/wCefhn85qPtnxF/55+GfzmrrqKA9kjkftnxF/55+Gfzmo+2fEX/AJ5+GfzmrrqKA9kjkftnxF/55+Gfzmo+2fEX/nn4Z/OauuooD2SOR+2/EQdYvDX5zU+z8Y6vpeoW9r4w063tobhxHFe2rlot56BgeRn1rq65b4nQpP4H1QOM7EEin0IIINApU0ldC/GM58N2H/YStv8A0Ku9HQV5n8RZmuPh/oEznLyXVmxPucV6YOgrqq/7tT9ZfocNP+NP5fqFFFFcZ0hRRRQAUUUUAcF4m/5Kv4N/65Xf/ouoYv8Akr2qf9gyL/0I1N4m/wCSr+Df+uV3/wCi6hT5fi9qWe+mRkf99114v+HT/wAP/t0jko/FU9f0R11FFFeYbBRRRQIKKKKACsrxVI8PhjVpInZJEtJWVlOCCEOCDWrUc8MdxBJDOiyRSKUdGGQynqDQgPPriC9HhmBYWvbC4ubi3VZmvDMTnvyeB7VHceJb8/2m6lo76z00rND1EcofBfH05B9K7i00PTLSPZbWUEa71fCrxuXofwqcadZi8muvs0X2iZBHJJtGXX0PqKvmQ7nJ6rb/ANkeHRcWOo3byXLQxS3MlwX2q7gNIATgHB7VBqRk0bUZ7TT7y6kgm02eZ1kmaQxuoG1wx5Gc11VtoOl20cyQWFvHHMu2RAgww9KdZ6JpllHLHa2UESTLtkCp95fQ+1HMgucnZvLptxoYtb65m+3W7NcQyzGTbiPd5gySRzx+NZVrJeR/D28vmF7HO9upW4N6zlyWGSFz8pr0LT9F03TyxsrG3gLLtYogBI9PpUUHh7SLeKWOHTrZI5VCuoThgDnB/EUcyC5j+BpLhrrVEke5S3jZES2upfMljO3JOcng5GOT3rragS0gjunuEiRZ3UI0gHLAdAanqW7u4gooopAFFFFABRRRQBz3xB/5ErWf+vZ/5Vv+EP8AkVdG/wCvOL/0AVz/AMQ2CeCdYLHA+zsPz4rofCalPC+kKwwVtIgf++BW0PhLia1FFFUWFFFFABRRRQBynxA0SbUdPhvtLO3WNOfz7Vv7xHVD7EcVp+Fdbg8QaHb6hb/LvGJI+8bjhlP0NbBGa8/mH/CG+NhMvy6Hrcm2QfwwXP8Ae9g386Yg8S203hPXD4l0yNn0+chdTtkHb/nsB6jvXc2NzDe2kVzayLLBKodHXowPSpJo1miaN1V43GGUjIINef6W7+BvEC6RcsT4fv3JspWPFvIesRPoe1Az0J1DKVYAgjBB7157qVhd+Br+XVdEie40KZt95YJ1h9ZIx/MV6HmggEYPIpAVNJ1G11axivLCZJreVdysv+etW6891TTbzwVqEusaBE0+jynfe6cnOz1kjHr6iu10fU7TV9PivdPmWa3lGVZf5H3oAu0UUUAc5468OHxRoMmnCeO3LsD5rRCQrjnj0PvXPfDXwXq3g26uYZL+C70ucbtoBVkcdCB05HXn0r0SinfoB53a/wDJXNZ/7B8P/oVdfXIQfL8XNXB43afER7/NXX0zen8IUUUUiwooooAKKKKAKessyaReuhKusLkEHkHBri9Lkm0+Tw0bW9uZjqMObmGWYyD/AFe7eM5K849ua76RFljZJFDIwKlT0INUdP0XTdOYtY2Nvbuw2lo0AOPTNMlps4PSbm/tfB11qSx3S3BtyVuZbsyg5bBYITxgc9O1a2tW39j6FHJYahds11LBDNcvcGTajsA0gzwDz1HHNdJZ6Fpdk0htLGCLzFKvtTG4HqDSW2g6VawzRW9hbpFMu2RQgww9MelAcrOU1ZpdG1C9srC8uZLeTS5rhlkmMhideFYMTkZyfy4p1vJLpl9pMVne3MyX1pI88Msxk2YTcHBPK8nHXHNdXaaJplnDNFbWMEccy7ZAqj5x6H2pbDRdN0/f9hsbeAuu1iiYLD0PtRcXKzz+B7yPwBc3pW9jnkgiInN8ZDJlhkqN3yn/ABrpfA8k7zaoryXC28cqpHbXUvmSxHbkknJ4Ocjk1rW/h7SbeKSKHT7dI5AA6heGwcgH8avR2sEd1JcJEizyKFdwMFgOgNA1Fk9FFFIoKKKKACiiigArm/iN/wAiTq//AFwNdJXNfEhgvgfV8nH7nH600KWzMnx5/wAk48N/9fFl/SvUB0FeYePlK/Dnw6pGCLmyB/SvTx0FdVX/AHan6y/Q82n/ABp/L9QooorjOkKKKKACiiigDgvE3/JV/Bv/AFyu/wD0XU3jXQtR/ta08Q+HQkmo28Zhmt3OFniPOM9iDyKh8Tf8lX8G/wDXK7/9F13grrxXwUv8P/t0jlofFU9f0R50PGOoRjbc+D/ESyDqIrcSL+eaP+E0uv8AoUPE/wD4B/8A169Gori5UdHKjzn/AITS6/6FDxP/AOAf/wBej/hNLr/oUPE//gH/APXr0aijliHIec/8Jpdf9Ch4n/8AAP8A+vR/wml1/wBCh4n/APAP/wCvXo1FHLEOQ85/4TS6/wChQ8T/APgH/wDXo/4TS6/6FDxP/wCAf/169Goo5YhyHnP/AAml1/0KHif/AMA//r0f8Jpdf9Ch4n/8A/8A69ejUUcsQ5DzeTxvPFE8kvhPxKiICzM1oAAPU80Q+OZpoklh8J+JXjcBlZbMEMD0I5q98Y5dVHg26i0eBpPMB+0yBgPLiAyx5Pfp+dM+DEuqt4MtotXt2RYwDbSlgfMiIyvQ9un5U+RWuHKit/wml1/0KHif/wAA/wD69H/CaXX/AEKHif8A8A//AK9ejUUuWIch5z/wml1/0KHif/wD/wDr0f8ACaXX/QoeJ/8AwD/+vXo1FHLEOQ85/wCE0uv+hQ8T/wDgH/8AXo/4TS6/6FDxP/4B/wD169Goo5YhyHnP/CaXX/QoeJ//AAD/APr0f8Jpdf8AQoeJ/wDwD/8Ar16NRRyxDkPOf+E0uv8AoUPE/wD4B/8A16P+E0uv+hQ8T/8AgH/9evRqM0csQ5UeX3lvrvjdorGfSp9H0TeHuHuSBLKAc7Ao6AmvTo0WONUQYVQAB6CnZoppW2GlYKKKKYwooooAKKKKACszxHo9vrujXWn3Q+SZMBu6N2Ye4NadFAHI/D7WJ7q0uNJ1UgavpbeRMD1kX+GQexFbniHR7XXdJuLC+XdFKMAjqjdmHuDXL+PLSfSNQtfFmmozS2g8q9iX/lrbk8/ivWuysLuC+sobq0dZIJkDoy9CD0p+YHJeB9Yure6m8Na8+dVsl/dTH/l6h7OPfsa7UVy/jfw9Jq9rDeaa4g1qxPm2k3v3Q/7J6VZ8G+Io/EGl+ayGC9hbyrq3P3opB1B9vSgDfIyMGvPtY0y78HajLrXh6Fp9MlO+/wBOXt6yxj19RXoNHWkBR0XVbTWdPivdPmWW3lGQR1HsR2PtV6vP9Y0i98JajLrnhuIzWMp3X+mr0b1kj9G9R3rsNC1ez1vTIr7TpllgkGQR1B7gjsaANCiiigDiPGmhakutW3iLw6qTX8EZgmtpG2ieLOcA9iDVBfGOoIMT+D/EQkHXyrcSL+BzXo1IOadylJrY87/4TS6/6FHxP/4Bf/Xo/wCE0uv+hR8T/wDgF/8AXr0Sii4/aSPO/wDhNLr/AKFHxP8A+AX/ANej/hNLr/oUfE//AIBf/Xr0Sii4e0ked/8ACaXX/Qo+J/8AwD/+vR/wml1/0KPif/wC/wDr16JRRcPaSPO/+E0uv+hR8T/+Af8A9ej/AITS6/6FHxP/AOAX/wBevRKKLh7SR53/AMJpdf8AQo+J/wDwC/8Ar0n/AAml1/0KPif/AMA//r16LWT4qm1C30C8fR7drjUDGVhRSB8x4zzxx1/Ci4ueRxlv48kuUL2/hbxHKgYqWS0BAIOCPvdQRUv/AAml1/0KPif/AMAv/r1hfAN9Zhj1G3vreRtPaVmWcuDtlBwynnPPH5V7DTYKcjzv/hNLr/oUfE//AIBf/Xo/4TS6/wChR8T/APgF/wDXr0SilcftJHnf/CaXX/Qo+J//AAD/APr0f8Jpdf8AQo+J/wDwC/8Ar16JRRcPaSPO/wDhNLr/AKFHxP8A+AX/ANej/hNLr/oUfE//AIB//Xr0Sii4e0ked/8ACaXX/Qo+J/8AwC/+vR/wml1/0KPif/wC/wDr16JRRcPaSPOv+E0u+3hDxP8A+Af/ANeql3ba744eKyudKn0fQxIr3DXJAlmAOQoXtXqFFFxObejOB+MChPDGnqowq6lbAAdhurvx0FcF8ZP+RbsP+wlbf+hV3o6Cuur/ALtT9ZfoclP+PP5fqFFFFcZ0hRRRQAUUUUAcF4n/AOSr+Dv+uV3/AOi6teNPEN/BqVpoXh5Izq10pkaWQZW3iHG8juc9BVXxN/yVfwb/ANcrv/0XUKjf8X9RLfw6ZGB7ZeuvFaU6T/u/+3SOWj8VT1/REf8Awi+vTfPdeMdTEh6+Qiov4Cj/AIRHVv8Aocta/wC+lrsqK8/nZrdnG/8ACI6t/wBDlrf/AH0tH/CI6t/0OWtf99LXZUUc8u4XZxv/AAiOrf8AQ5a1/wB9LR/wiOrf9Dlrf/fS12VFHPLuF2cb/wAIjq3/AEOWtf8AfS0f8Ijq3/Q5a3/30tdlSOwRCzsFVRkknAAo55Duzjv+ER1b/octb/76Wj/hEdW/6HLWv++lrqUv7N0Lpd27KCAWEgIBPQdetT+YvmbNy78btuecetHPILs4q68E6jdW8kFx4u1mSGRSjoxXDKeCDRa+CdRtLaO3tvF2sRQxKERFK4UDoBXXw3trMziG5hkMf3wjg7fr6UsN3bzo7w3EUiJ95kcEL9aOeQXZyn/CI6t/0OWtf99LR/wiOrf9Dlrf/fS11lvdQXCk288UoXqUcNj8qjTULN1dku7dlQZYiQHaPejnkF2cv/wiOrf9DlrX/fS0f8Ijq3/Q5a1/30tddBPFOm6CVJFzjKMGH6VJRzy7iuzjf+ER1b/octb/AO+lo/4RHVv+hy1r/vpa7Kijnl3C7ON/4RHVv+hy1v8A76Wj/hEdW/6HLWv++lrsqKXPILs43/hEdW/6HLWv++lo/wCER1ft4y1rPuVrsqKOdhdnDT6j4h8Fyw3OqagNY0NnEczvGFlgycBsjqM9a9NRg6KynKsMg1xHxFQP4J1gMAR9nZvy5ro/CjmTwzpDuSWa0iJJ/wBwVpF3Vy4s1aKKKooKKKKACiiigAooooAZNGksTRyKGRgVZSMgiuE8JyN4X8ST+F7lj9hmzcaY7HIC9Wiz6jtXfVzXjvQX1vSQ1kwi1OzYXFpN0KyLzjPoelNAdKRmuD8Y2NxoGrf8JVo0ZfaNuo2y/wDLaL++P9pa3/Buup4g0OK72+XcrmO4hPWKVeGUitxlDKQwBBpAVtLv7fVLCC8spFlt5lDow9KtV53GW8A+IRExI8MalL8hzxaTnsfRDXoanK5HIPSgBSMjmuB1zR7zwvqcuveGImlt5Duv9OXpKO8iDsw/Wu+ooAztB1mz13TYr7TpRJDIPoVPcEdjWjXnvifTZ/CV5P4l0DaLY/PqFiW2pKP76dg3860vAPji18YtqH2WF4RbOoUOfmdSPvfmDTt1AZ408Q6hDqlroPh5YzqlyhleaUZS3jzjcR3PoKy/+EW12b5rnxlqYkPXyVVF/AdqfB8/xc1YtyU06ID2y1dfTNYRTV2cb/wiOrf9DnrX/fQo/wCER1b/AKHPWv8AvoV2VFFyvZx7HG/8Ijq3/Q561/30KP8AhEdW/wChz1r/AL6FdlRRcPZx7HG/8Ijq3/Q561/30KP+ER1b/oc9a/76FdlRRcPZx7HG/wDCI6t/0Oetf99Cj/hEdW/6HPWv++hXYsQqlmIAAySe1RW91b3AJt54pQOuxw2PyouHJHscn/wiOrf9DnrX/fQo/wCER1b/AKHPWv8AvoV1UF7azuUguYJGAyQkgYj8qIb21m3+TcwyeX9/a4O36+lAckTjNP8AAN5p0Tx2PivVoI3cyMqFQCx6n61Z/wCER1b/AKHPWv8AvoV1cN3bTRtJDcQyRr95lcED60sF1BcKWgnilUdSjBgPyo1Dkicn/wAIjq3/AEOetf8AfQo/4RHVv+hz1r/voV1CahZOrsl3bsqDLkSAhfrU0E0U6b4JEkTpuRgw/SjUOSJyP/CI6t/0Oetf99Cj/hEdW/6HPWv++hXZUUXD2cexxv8AwiOrf9DnrX/fQo/4RHVv+hz1r/voV2VFFw9nHscb/wAIjq3/AEOetf8AfQo/4RHVv+hz1r/voV2VFFw9nHscb/wiWrjp4z1nPbJFQzal4g8GTQXGrX/9saG8ixSyPGFlt8nAbI6j1ruK5n4lIH8D6uGGcQ5/EEGgUoK2hH8YWD+GdPZTkHUrYg/8Crv16CvMPH7F/h34dZjktc2RJ/KvTx0FdVX/AHan6y/Q4Kf8efy/UKKKK4zpCiiigAooooA4LxN/yVfwb/1yu/8A0XUMX/JXtU/7BkX/AKEam8T/APJVvBv/AFyu/wD0XUAOz4v6gG6yaXGV98PXXi/4dP8Aw/8At0jko/FU9f0R19FFFeYbBRRRQIKKKKACsjxeM+FNZAGc2cwx/wAANa9BGRg9KadhnmP2Swv/AAxBaRS6fMJbq2SX7FD5RXP97k5PXmmXDarLc61Yusp1K00swiRBgzLvyGU+pX9a9QCKOigd+lAUA5AGafOB59qI0WfwyV0CCD5fJN3HBFiTyQ43q2BnOOoPNRXg0+e/upPDkUQs10udblrePZGWx8gOMAt1969GCKOigZ68UBFUYCgD2FHMB5xpC2EtzoreH4UV47VxfSQx7UK+XgKxHBbdj3rIsUtT8Or5IJNOkujbIHjhg2yr84/1hyd35CvXlUL0AH0pBGg6Kvp0p8wHIeB7X7Fqmrw3EcFtdN5beRbpsiKbeHUep5B+ldjSYGc4GaWpbuIKKKKQBRRRQAUUUUAc98Qf+RK1n/r2f+Vb/g//AJFTRv8Arzi/9AFc78RpFi8D6wzkAGAqCfU8D+ddL4WjaHw1pMbgh0tYlIPYhRW0PhLialFFFUWFFFFABRRRQAUUUUAFFFFAHn2sA+D/ABimsJldG1Z1ivQOkU38MnsD0NegKQy5HI7GqWuabb6vpVzYXiB4J0KMPT0I9wea5n4falcIt14e1ZydT0w7A5P+uh/gcfhxTEdPrGmW2r6ZcWF9GJLeddjg/wAx7iuT8G6ldaRqT+FdbkL3EK7rG4b/AJeIewz/AHl713Nc7418O/2/p6fZ38jUrVvOtLgdY5B/Q9DSGdFRXN+CfEJ1uxkivU8jVbNvJvID1V/UexxkGukoAp6tpdnq9i9nqUC3Fs/3o26Gua8M+AdM8Ma5LqGjS3EKTIY5Ldm3IR1GO/H1rsaDQB53a/8AJXNZ/wCwfD/6FXX1yEX7v4vaqrcGTTomX3w1dfTN6fwhRRRQWFFFFABRRRQBS1wZ0a+H/TB//QTXA6CtlLP4bOgxIJ44P9Pkgj2rs8v7rkDBO7HvXpfXrSKoXoAPpTTE1c8q0NFfwbex6fLZPqBtTuitoNlwBu+YM2ck4yOgrV1YaNN4dH9gQW5jSSE3sdtFhzAHG5WAGenUda79UVfuqo+goCKM4VRnrgUXFynm98LGa81KTw3HEtgulTLctbx7Y2f+AccFsZ+lP04WUt/pb+HYUVY7OQX8kEe1CCg2qxxgtu/HrXowVQMAAD0xQqgdAB9BRcOU8ks0tn+Hl1HbSabJc/ZoQ8cFvtkX514kOTn8q6zwNb/Y9R1eGeOG2uyyMYLdNkWzbhXUe/f3FdcEUZwq89eKXAznAz60XBR6i0UUUigooooAKKKKACub+I3/ACJOr/8AXA10lcx8SpFj8D6sXOMxbRn1JA/rTQpbMy/Hn/JOPDf/AF8WX9K9QHQV5j8QUaL4e+Ho3GGS6slI9CMV6cOgrqq/7tT9ZfoebT/jT+X6hRRRXGdIUUUUAFFFFAHBeJv+Sr+Df+uV3/6Lq/4y8M3OpXVrq2iXCWus2gKozjKSoeqN7f4mqHib/kq/g3/rld/+i67wV14r4KX+H/26Ry0Piqev6I86GpeN4/lk8LW0jDqyXygH86X+1fGn/Qoxf+B6V6JkUZHrXHyrsdHKjzz+1fGn/Qoxf+B6Uf2r40/6FGL/AMD0r0PI9aMj1pcsewciPPP7V8af9CjF/wCB6Uf2r40/6FGL/wAD0r0PI9aMj1o5Y9g5Eeef2r40/wChRi/8D0o/tXxp/wBCjF/4HpXoeR60ZHrRyx7ByI88/tXxp/0KMX/gelH9q+NP+hRi/wDA9K9DyPWjI9aOWPYORHmt5r/i+ytZrm68KwxwQoXdjfpwAMmksfEHi6/s4bq08KwyW8yB43F+nzKehrS+MVpqd94Mu4tLkhjiVTLcs7EExqM7Rx3xTPgza6nZeDbWPU5IpIHUS2pViSI252nI7E/rT5I2vYOVEH9q+NP+hRi/8D0o/tXxp/0KMX/geleh5HrRketLlj2DkR55/avjT/oUYv8AwPSj+1fGn/Qoxf8Ageleh5HrRketHLHsHIjzz+1fGn/Qoxf+B6Uf2r40/wChRi/8D0r0PI9aMj1o5Y9g5Eeef2r40/6FGL/wPSj+1fGn/Qoxf+B6V6HketGR60csewciPPP7V8af9CjF/wCB6Uf2r41/6FGL/wAD0r0PI9aMj1p8sewciPN/7B8ReKrqBfE0VvpukwusjWkMnmPOQcgM3THtXpCgKoA6AYFHFLTGlYKKKKBhRRRQAUUUUAFFFFABRRRQAGuJ+INhcWklr4m0pN1/po/exqOZ4D95fw6iu2pGUMCGAIPUGgCppOo2+q6db3tm4e3nQOjD37fWrlef6Ex8H+LZdDlJGkakzT2DHpHJ/FF/UV6AKAOI8a6TdWF9H4n0FC19bLtuoB/y8w9x/vDtXT6DqtrrWlW9/YSeZBMu4HuD3B9xV9hkdK88vlbwH4ia/iB/4RvUZP8ASkA4tZj0cD+6e9MD0Simo6uoZGDKwyCDkEU6kByPjLwzdaheW2r6HcJbazagqpkGUmT+43t71kDU/G6fK/ha3kYcFkvlAP516IcUZFO41JrY87/tXxr/ANClF/4HpR/avjX/AKFKL/wPSvRMj2oz9KLj55dzzv8AtXxr/wBClF/4HpR/avjX/oUov/A9K9Ez9KM/Si4c8u553/avjX/oUov/AAPSj+1fGv8A0KUX/geleiZ+lGfpRcOeXc87/tXxr/0KUX/gelH9q+Nf+hSi/wDA9K9Ez9KM/Si4c8u553/avjX/AKFKL/wPSj+1fGg6+Eosf9f6V6Jn6Vk+Kor+40G8g0dokvpUKRvIxULnqcgHkDNFw55dzhdL8U+KNVgeaw8MwTxpI0TMt+vDKcEVc/tXxr/0KUX/AIHpWH8BdP1bT4dRM0kD6a8zRkByWWVDgkDHQj+Qr2DP0pvQFOXc87/tXxr/ANClF/4HpR/avjX/AKFKL/wPSvRM/SjP0pXDnl3PO/7V8a/9ClF/4HpR/avjX/oUov8AwPSvRM/SjP0ouHPLued/2r41/wChSi/8D0o/tXxr/wBClF/4HpXomfpRn6UXDnl3PO/7V8a/9ClF/wCB6Uf2r41/6FKL/wAD0r0TP0oz9KLhzy7nnf8AavjX/oUoh/2/pTBoHiHxVd2//CTxW+naTDIJDZwyeY87A5G5umPavR8iii4nJs4L4xAL4bsAOn9pW3/oVd8vQVwXxj/5Fuw/7CVt/wChV3o6Cuur/u1P1l+hy0/40/l+oUUUVxnSFFFFABRRRQBwXif/AJKv4N/65Xf/AKLp3jXVdSu9etfDWhTm0mlhNxdXQGWiizjC+5P9KZ4m/wCSreDf+uV3/wCi6ij5+L2p57aZEB/33XXi/wCHS/w/+3SOWj8VT1/REQ+HmnPzd6hq1zL/ABSPdsC35Uf8K50X/ntqf/gY9dnRXnczNbnGf8K50X/ntqf/AIGPR/wrnRf+e2p/+Bj12dFHM+4XOM/4Vzov/PbU/wDwMej/AIVzov8Az21P/wADHrs6KOZ9wucZ/wAK60X/AJ7an/4GPR/wrnRf+e2p/wDgY9dnUdxPHbW8k07rHFGpd3Y4CgDJNHM+4XOQ/wCFc6L/AM9tT/8AAx6P+FdaL/z21P8A8DHrfXxBpTWrXH26EQKyqXLYUFugzVxry3WYxGaMSLH5pBbon976Uc0gOTf4b6G6lXk1JlIwQbxyDQnw30SNAiSaiqqMAC7cAV0Ntr2l3Mc0kN9A0cK73fdgBfXPTFOtNb027SZre8hcQrvk+blV9fp70c0g1Od/4Vzov/PbU/8AwMej/hXOi/8APbU//Ax66Ox1rTr9itneQysq7yobnb649Kii8Q6TLbyzx30JhiXc754UZxnNHNIDB/4Vzov/AD21P/wMej/hXOi/89tT/wDAx66jT9Ss9SV2sbmKcIcNsbJU+/pVujmkFzjP+Fc6L/z21P8A8DHo/wCFdaL/AM9tT/8AAx67OijmfcLnGf8ACudF/wCe2p/+Bj0f8K50X/ntqf8A4GPXZ0Ucz7hc4z/hXWi/89tT/wDAx6P+FdaMOk+pg/8AX49dnRRzPuFzgNRt9R8BomqafqV3eaRG6rdWd0/mFUJxuQ9eK9RgkWaFJIyCjqGUjuDXF/EIA+CtZz/z7Mf0rofCJLeFtHJ5JtIj/wCOCtIO6uy4mtRRRVFBRRRQAUUUUAFFFFABRRRQAUUUUAYHjTQBr+iPbo/lXkRE1rMODHKvIOaj8D6+dc0UNcp5Wo2zGC7iPBSReDx6HrXRmuA8TI3hTxRB4jgBGnXhW31JB0U/wy/h0NNAd/VbULKDULOa1u41lt5kKOjDIINTxusiKyMGVhkEHIIp1IDgfCd5P4Z1n/hFtWkZ7dstply//LRP+eZP94fyrvc1y/xG0+0vfC9zLdiZZLUedBLAhaSOQdCoHNcj8JfH2o+KNZurLVPKXybZWQIuCzA4Yn3OaYjb8Z6rqV94gtvDWh3H2OSSI3F3dgZaOPOAF9yapj4eaa/N3f6vcS/xSPeMCT+FSW/Pxc1jPbT4gP8Avquvpm0Ipq7OM/4Vzov/AD21P/wMej/hXOi/89tT/wDAx67OikacqOM/4Vzov/PbU/8AwMej/hXOi/8APbU//Ax67OigOVdjjP8AhXOi/wDPbU//AAMej/hXOi/89tT/APAx67OigOVdjjP+Fc6L/wA9tT/8DHo/4Vzov/PbU/8AwMeuxlkWKJ5JGCooLMx7AVQ0/WtN1B9lneQTPt37Vbkr649KYuWJzv8AwrnRf+e2p/8AgY9H/CudF/57an/4GPXQ2euabeeYba8hkEal2IbgKOpz6Ulvr+lzxTSxX0DRQrud92AB659KA5YnOR/DTQYlIifUUBOSFu2GTT/+Fc6L/wA9tT/8DHrorXXNNuo5pILyF1hXfJ83Kr6kelLY61p18XFpeQSsi72VW5C+uPSgOWJzn/CudF/57an/AOBj0f8ACudF/wCe2p/+Bj1uxeItJlt5bhL6EwxKGd88AE4HNW9P1Gz1GN2sbiOcIcNsbO0+9AcsTl/+Fc6L/wA9tT/8DHo/4Vzov/PbU/8AwMeuzopD5V2OM/4Vzov/AD21P/wMej/hXOi/89tT/wDAx67OigOVdjjP+Fc6L/z21P8A8DHo/wCFc6L/AM9tT/8AAx67OigOVdjjP+FdaN2n1QH1F49UtRg1LwGseqWGo3d7o6uqXVpdPvKITjch6jFegVzXxHAPgfVwf+eJP60yZQViH4vSLL4X06RDlG1G2YH1BavQB0FeX+PST8OfDhPU3Nl/SvUB0FdVX/dqfrL9Dz6f8afy/UKKKK4zpCiiigAooooA4LxN/wAlX8G/9crv/wBF1DF/yV7VP+wZF/6Eam8Tf8lX8G/9crv/ANF1A7CD4wXYk48/S0Mf+1hua68X/Dpf4f8A26RyUfiqev6I6+iiivMNgooooAKKKKACsvxVFJP4Z1aKFWeR7SVVVRksSpAAFalFAHnq2kmpeH4LOf8AtK7UXNuJEu7by9i9GA+UZHvVWTR9Xln1jTWilk8nTvs9tcMCFmXflV3f3tvymvTKKvnA4zVZX1jw6Lay068jltnhkkt5YSm5UYFkBPB4Hb0qvqiz61qE95ZWV1DBDps8DGaIxtI7fdQA8nGP1ru6KXMBwdlHPqU2htDYXVuNPt3E800RjLEx7dgB5PPPpwKybK2vH8A3dgBqr3Yt0AgmtSqoQ44Q7Rn9elepUU+cDlvClncWWr6r/aPmzXUvlst0Y9qSRgYC4AwCCTkV1NHPvRUt3dwCiiikAUUUUAFFFFAHPfEH/kStZ/69n/lW/wCD/wDkVNG/684v/QBXN/EqdLfwRqxkON8XlqPVmOAP1rqfDcD2vh7TIJBh4raNGHoQoFaw2KiaVFFFWWFFFFABRRRQAUUUUAFFFFABRRRQAVW1Kyg1Gxns7tA8E6GN1PcGrNFAHD+AL2fTbu68Lao5a5sRutZG/wCW1ufun6joa7iuO+IWk3EkFtrmkL/xNtLbzUA/5ax/xIfXIroPD+rW+t6Pa6jaNmGdNwB6qe4PuDTYGjWRH4d0uHWhq0FpHDf7WRpIxt3g9dwHBrXopAed23/JXNa/68If/Qq6+uQBEHxe1BZDg3GnRsmf4sNg119Ub09gooopFhRRRQAUUUUAU9YVpNJvUjUs7QuAAMknBrjtMiudRbw0IrG6t/7Nh/0iWeIx/wDLPbsAPJ5/Diu9oppktXPM9Isb258I3enK2pm6+z4FtcW/lxjDZ2qxUZz05J61saxM+taDHHZafeKbWaGaS3lhMe9UYFkGeGOB244rtKOtFw5TgtWSfWb68vrKyu4YI9Mmtz5sRjaV25ChTycYP50+CK41S80ia3sbm3jsbWRZpZ4jGXJTaEAPJ55ruqKLi5TzC0tryXwJcWKnVZLlYYl8ie12LGQ65CHaC2Pqa6fwnZ3NlqmrDUPNlu5WRvtJj2pJGBhQMDAI5BH411FFFxqKQUUUUigooooAKKKKACub+I3/ACJOr/8AXA10lct8Tpkh8D6pvON8YjX3YkAU0KWzM3x5/wAk48N/9fFl/SvUB0FeZ/EWFrfwBoMMgw8d3ZqR6EYr0wdBXVV/3an6y/Q82n/Gn8v1CiiiuM6QooooAKKKKAOC8T/8lX8G/wDXK7/9F1seL/C8evrbzwXL2Op2h3W91GMlfUEdwfSsfxN/yVfwb/1yu/8A0XXefWuvFfBS/wAP/t0jmofFU9f0R58NM+IMI2pfeHZwOA80cqsfqF4pfsPxE/57eFv++Z67ObV9NgcpNqFnG44KvMoI/Wmf27pH/QUsP/AhP8a5OVdjosjj/sPxE/57eFv++Z6PsPxE/wCe3hb/AL5nrsP7d0j/AKClh/4EJ/jR/bukf9BSw/8AAhP8aOVdgsjj/sPxE/57eFv++Z6PsPxE/wCe3hb/AL5nrsP7d0j/AKClh/4EJ/jR/bukf9BSw/8AAhP8aOVdgsjj/sPxE/57eFv++Z6PsPxE/wCe3hb/AL5nrsP7d0j/AKClh/4EJ/jR/bukf9BSw/8AAhP8aOVdgsjj/sPxE/57eFv++Z6PsPxE/wCe3hb/AL5nrsP7d0j/AKClh/4EJ/jR/bukf9BSw/8AAhP8aOVdgsjg9Wfx5pWm3N9eXHhdYLdDI5Cz9B/Wk0aTx5q+l21/Z3Hhdre4QOhKz557H3qb4ttba54TuIbLXbGFIQZ5IxKrNNtGQgwfWm/B9rbRPCcEV7rljJHOBPHE0qq0G4ZKHJ9aOVW2FZXLP2H4if8APbwt/wB8z0fYfiJ/z28Lf98z12H9u6R/0FLD/wACE/xo/t3SP+gpYf8AgQn+NHKuw7I4/wCw/ET/AJ7eFv8Avmej7D8RP+e3hb/vmeuw/t3SP+gpYf8AgQn+NH9u6R/0FLD/AMCE/wAaOVdgsjj/ALD8RP8Ant4W/wC+Z6PsPxE/57eFv++Z67D+3dI/6Clh/wCBCf40f27pH/QUsP8AwIT/ABo5V2CyOP8AsPxE/wCe3hb/AL5no+w/ET/nt4W/75nrsP7d0j/oKWH/AIEJ/jR/bukf9BSw/wDAhP8AGjlXYLI4/wCw/ET/AJ7eFv8Avmej7D8Q/wDnv4XH/AZ67D+3dI/6Clh/4EJ/jR/bmknpqlj/AOBCf40WXYLI5G08G6tqmoW914w1OG6ht3EkdlaoVh3DoSTycV344FRwTxTxh4JUkQ9GRgw/SpKASCiiigYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAjDIwa8/sx/wAIb40ayb5dE1pzJB2WC47p9G7V6DWN4u0OLxDodxYyko7DfFIOscg5Vh+NMDZorl/AOuTarpkltqIEer2D/Z7uM8HcOjfQjmuopAcz4w8LJrv2e5trp7DVLUk293GMlc9VI7g1hrpnxBiGxL/w7Oo6PLHKrH8F4r0LIqjNq+mQuVm1CzjcdQ06gj9ad2O7Rxn2L4if89/C/wD3zPR9i+In/Pfwt/3zPXYf27pH/QUsP/AhP8aP7d0j/oKWH/gQn+NF2HM+5x/2L4if89/C3/fM9H2L4if89/C3/fM9dh/bukf9BSw/8CE/xo/t3SP+gpYf+BCf40XYcz7nH/YviJ/z38Lf98z0fYviJ/z28Lf98z12H9u6R/0FLD/wIT/Gj+3dI/6Clh/4EJ/jRdhzPucf9i+In/Pfwt/3zPR9i+In/Pfwt/3zPXYf27pH/QUsP/AhP8aP7d0j/oKWH/gQn+NF2HM+5x/2L4if89vC3/fM9I1p8QlUs0/hYKBknbPxXY/27pH/AEFLD/wIT/GsnxTfWeqaDeWVhrun2s06eX5pmVtqnrxnrjNO7Dmfc47w3qnjTxHZy3OmXfhh445WhbKzZyp/keo+ta/2L4if89/C3/fM9cr8EbePRRf3E+t2S2skjQtbO6qSyHhxk9DzXrX9u6R/0FLD/wACE/xobYKb7nH/AGL4if8APfwt/wB8z0fYviJ/z28Lf98z12H9u6R/0FLD/wACE/xo/t3SP+gpYf8AgQn+NK7Dmfc4/wCxfET/AJ7+Fv8Avmej7F8RP+e3hb/vmeuw/t3SP+gpYf8AgQn+NH9u6R/0FLD/AMCE/wAaLsOZ9zj/ALF8RP8Ant4W/wC+Z6PsXxE/57+Fv++Z67D+3dI/6Clh/wCBCf40f27pH/QUsP8AwIT/ABouw5n3OP8AsXxE/wCe3hb/AL5no+xfET/nt4W/75nrsP7d0j/oKWH/AIEJ/jR/bukf9BSw/wDAhP8AGi7Dmfc4/wCw/ET/AJ+PDA+iz06z8G6pqWo2934w1KC7jt3EkVnaxlIt46Mc8nFdd/bmknpqlj/4EJ/jV2CaKeMSQSJIh6MjAg/iKLsG29zhvjGMeG7D/sJW3/oVd6OgrgvjH/yLdh/2Erb/ANCrvR0FddX/AHan6y/Q5af8afy/UKKKK4zpCiiigAooooA4LxP/AMlX8Hf9crv/ANF1F40nu9c8VW3hi2uZbSzWD7VeyRNtd1zgID2qXxN/yVfwb/1yu/8A0XUMf/JXtT/7BkX/AKFXXiv4dJ/3f/bpHLR+Kp6/oiSD4f8AhiJAp0mCQjjdISxP61L/AMIJ4X/6Alp/3yf8a6WivN5ma6nNf8IJ4X/6Alp/3yf8aP8AhBPC/wD0BLT/AL5P+NdLRRzMLs5r/hBPC/8A0BLT/vk/40f8IJ4X/wCgJaf98n/Guloo5mF2c1/wgnhf/oCWn/fJ/wAaP+EE8L/9AS0/75P+NdLUF/dxWNjcXdxkQwRtK+0ZOAMnijmYXZg/8IJ4X/6Alp/3yf8AGj/hBPC//QEtP++T/jT38W2EWmNf3UN7b2yui7poCud3QgdxWjLrNjFM8ckoULb/AGpnI+UR5xnNP3g1Mr/hA/C//QEtP++T/jR/wgfhf/oC2n/fJ/xqxB4ospLWe5khvLe2iQP5k1uyhwTgbfUnsKda+JrGcXAdLm3lgiM5inhKMyDqyg9RReQalb/hBPC//QEtP++T/jR/wgnhf/oCWn/fJ/xqzpviawv5ViVbmCR4zLGs8LRmRQMkrnrUEHi6xl0yfUPIvktIk8wyPAQHGcfL69aLyDUb/wAIJ4X/AOgJaf8AfJ/xo/4QTwv/ANAS0/75P+NaWja1a6sZlgE0csO3zIpoyjrkZBwex9a06G2guzmv+EE8L/8AQEtP++T/AI0f8IJ4X/6Alp/3yf8AGulopczC7Oa/4QTwv/0BLT/vk/40f8IJ4X/6Alp/3yf8a6WijmYXZzX/AAgnhf8A6Alp/wB8n/GkPgPwv/0BbT8j/jXTUUcz7hdnnniDQ18Fw/2/4ZaS3Fuym5tN5McseQDwehFep2k6XNrFPEcxyoHU+xGa474g8+CtZz/z7P8Ayrf8If8AIq6P/wBecX/oArWDbWpUTXoooqiwooooAKKKKACiiigAooooAKKKKACiiigAooooA4LxlDJ4c1628V2SMYDi31KNR96Mnh8eqmu4t5o7iBJoWDxyKGVlOQQelV9aiebSbyKK2S6d4mUQSNtWTI6E9q8Z+Ed/rtp44Gh62bmGCG2kWG2kPyp8wPHr3wae6Edh4ymute8WQ+GLe5ktbFLf7VeyRHa8gJwEB7D1qWL4f+GI0C/2TDIR1aQlifxzUNtz8XNZ/wCwfD/Ouvpm9OKauzmv+EE8L/8AQEtP++T/AI0f8IJ4X/6Alp/3yf8AGulopF8qOa/4QTwv/wBAS0/75P8AjR/wgnhf/oCWn/fJ/wAa6WigOVHNf8IJ4X/6Alp/3yf8aP8AhBPC/wD0BLT/AL5P+NdLRQHKjmv+EE8L/wDQEtP++T/jR/wgnhf/AKAlp/3yf8a6C5mW3t5ZpM7I1LtgdhzWRpPiew1KWKNBcQPKnmRC4hMfmrjOVJ4NPUVkVv8AhBPC/wD0BLT/AL5P+NJ/wgnhf/oC2n/fJ/xqzp/ieyvbeW5WO7htI4zIZ5oSiFR6HvSQ+KbJ7Sa5mhvbeCNQwea3ZA+eBt9SfSjUNCv/AMIH4X/6Atp/3yf8aX/hBPC//QEtP++T/jVi38T2Msd0ZEuraS3iM7xXEJRyg/iAPUcU7TfEtjfSeUFuIJTGZUS4hMZdB1K560ahaJV/4QTwv/0BLT/vk/40f8IJ4X/6Alp/3yf8adD4usZNMm1D7PfpaRxrJve3K7wSANvr1FaOj6za6t5y24mjlhIEkU0ZjdcjI4PY0ahoZn/CCeF/+gJaf98n/Gj/AIQTwv8A9AS0/wC+T/jXS0Uh8qOa/wCEE8L/APQEtP8Avk/40f8ACCeF/wDoCWn/AHyf8a6WigOVHNf8IJ4X/wCgJaf98n/Gj/hBPC//AEBLT/vk/wCNdLRQHKjmT4D8Ln/mC2v4A/41heINFXwTGNf8MvLbxwOpurPeWjljJAPBPBFeh1zfxH/5EnV/+uBp3JlFWK3xamW48J6ZNHykmoWrqfYtmvQl6CvL/HnPw48Of9fFl/SvUB0FdVX/AHan6y/Q8+n/ABp/L9QooorjOkKKKKACiiigDgvE3/JV/Bv/AFyu/wD0XUMf/JXtU/7BkX/oVTeJ/wDkq3g3/rld/wDouq2oSrpvxaEl0fLi1DTxFC56M6vkr9cV1Yv+HT/w/wDt0jko/FU9f0R2NFFFeabBRRRQIKKKKACs7xHbS3nh/U7a3XdNNbSRoucZYqQBWjRQgOJh0q5uNItrU6fdxGO5geUXNwJQyj72PmOB7VSbwpqTXGrWOVNg1kYLKZm7FtwRu/HTPpivQ6KrmGclqUOp6xoYgbTWtbq2eKdQ8i7JXRgdoI7HHUiodQsdT1q8lu5bFrRYrGa3jjkkUtK7genAAx+tdnRRzAcXZ6dqd/JpH2qx+xw6bCwy8il5XMezAA6L36+lZlroeoN4KutL+wXkV20CpukuQ6MQwPyjccfpXo9FHMBznhnSrjStR1BZxJOs2x0u5H3OwAxsb6dvY10dFFJu4gooopAFFFFABRRRQBz3xB/5ErWf+vZ/5Vv+D/8AkVNG/wCvOL/0AVyvxOvY7bwjeQZ3XF2Bbwxjq7McYH612eh2psdGsbV+WggSM/UKBW0PhLiXqKKKosKKKKACiiigAooooAKKKKACiiigAooooAKKKKACq8lpBJcxXEkMbTxZ2SEfMuRg4NWKKAPO7b/krms/9g+H+ddfXHXjrpvxbd7o7I9RsFSFm4DOjcrn1xXY0zen8IUUUUFhRRRQAUUUUAVdViefS7uKMZeSJlUepINcnp2m6peHQku7I2cOlxfMzyBmlfy9mABnA78121FO4mrnnumaDeP4YudLksrq3ujBtEk1xviZg2QAu44/KtXVYtU1rSEjOmta3VrLFcIkkilJWRgSoIPTjqa62igXKcVqGnalrV1d3r2TWezT5baKJ5FLyO/04AGB+dPg07UtRutNkurL7FDp9u6gPIrPK7JtxgcAe9dlRRcOVHnVtol+3g240z7Bew3ZhjQtLch0YhhnYNxC9z0FdJ4Z0y40u+1GOcPMkrLIl3I+53GMbG/3e3sa6GigajYKKKKQwooooAKKKKACub+I3/Ikav8A9cDXSVyPxRvEg8I3VtkG5vCtvDGPvOzEDgU0KWzKXjz/AJJv4b/6+LL+leoDoK82+JlubTwPols33ory0jP4HFekjoK6qv8Au1P1l+h5tP8AjT+X6hRRRXGdIUUUUAFFFFAHn/jxxYeM/COqTfLbpPJbOx6KZFwM11HiDQNP8RWH2XUod6K25HU7XRvVT2NS+IdGtNe0mewv03QyjGR1U9iPcVx1rN4z8MRi1axj8QWMfyxTRyiOYL2DA9a7lFYilGMWlKOmrtdXvu/U5daM5Nq6ZL/wgN/F8ln4t1aOIcKr7XI/Gj/hBtY/6HHUv+/a0v8AwmuvD73gjVc+0imj/hN9d/6EjV/++1qfqNbsvvj/AJj+s0vP7n/kJ/wg2sf9DjqX/ftaP+EG1j/ocdS/79rS/wDCb67/ANCRq/8A32tH/Cb67/0JGr/99rR9Qrdl98f8xfWKXn9z/wAhP+EG1j/ocdS/79rR/wAINrH/AEOOpf8AftaX/hN9d/6EjV/++1o/4TfXf+hI1f8A77Wj6hW7L74/5h9Ypef3P/IT/hBtY/6HHUv+/a0f8INrH/Q46l/37Wl/4TfXf+hI1f8A77Wj/hN9d/6EjV/++1o+oVuy++P+YfWKXn9z/wAhP+EG1j/ocdS/79rR/wAINrH/AEOOpf8AftaX/hN9d/6EjV/++1o/4TfXf+hI1f8A77Wj6hW7L74/5h9Ypef3P/IyvE3h7UtB0G91K48Y6iUt4ywXYo3N2H4nAqPwloWo+I9BtNStvGOogTL8yhFOxu6/gapfEDU9V8TaDJbXvhPW7WGLM25ZE25AOC3HQdarfDLUtR8O6Cg07wvrV5FchZWfzEKM2MblGOAf6UvqNXbS/qv8x/WKdr6/c/8AI67/AIQbWP8AocdS/wC/a0f8INrH/Q46l/37Wl/4TfXf+hI1f/vtaP8AhN9d/wChI1f/AL7Wn9Qrdl98f8xfWKXn9z/yE/4QbWP+hx1L/v2tH/CDax/0OOpf9+1pf+E313/oSNX/AO+1o/4TfXf+hI1f/vtaPqFbsvvj/mH1il5/c/8AIT/hBtY/6HHUv+/a0f8ACDax/wBDjqX/AH7Wl/4TfXf+hI1f/vtaP+E313/oSNX/AO+1o+oVuy++P+YfWKXn9z/yE/4QbWP+hx1L/v2tH/CDax/0OOpf9+1pf+E313/oSNX/AO+1o/4TfXf+hI1f/vtaPqFbsvvj/mH1il5/c/8AIT/hBtY/6HHUv+/a0f8ACC6wevjHU/wjWl/4TfXf+hI1f/vtaP8AhN9d/wChI1f/AL7Wj6hW7L74/wCYfWKXn9z/AMi/oXgOx0/UU1G+urvVL9PuS3cm4R/7q9BXX1wP/Cb67/0JGr/99rR/wm+u/wDQkav/AN9rR9QreX3x/wAx/Wqfn9z/AMjvqK4H/hN9d/6EjV/++1o/4TfXf+hI1f8A77Wj6hW8v/Ao/wCY/rVPz+5/5HfUVwP/AAm+u/8AQkav/wB9rR/wm+u/9CRq/wD32tH1Gt5f+BR/zD61T8/uf+R31FcD/wAJvrv/AEJGr/8Afa0f8Jvrv/Qkav8A99rR9QreX/gUf8w+tU/P7n/kd9RXA/8ACb67/wBCRq//AH2tH/Cb67/0JGr/APfa0fUK3l/4FH/MPrVPz+5/5HfUVwP/AAm+u/8AQkav/wB9rR/wm+u/9CRq/wD32tH1Ct5f+BR/zD61T8/uf+R31FcD/wAJvrv/AEJGr/8Afa0f8Jvrv/Qkav8A99rR9QreX/gUf8w+tU/P7n/kd9RXA/8ACb67/wBCRq//AH2tH/Cb67/0JGr/APfa0fUa3l/4FH/MPrVPz+5/5HfUVwP/AAm+u/8AQkav/wB9rR/wm+u/9CRq/wD32tH1Gt5f+BR/zD61T8/uf+R31FcD/wAJvrv/AEJGr/8Afa0f8Jvrv/Qkav8A99rR9QreX/gUf8w+tU/P7n/kdP4k8Paf4isfs2pxF1U7kdTteNvVT2Ncv/wgOoRfLaeLtWjiH3VkxIQPqaX/AITbXf8AoSNX/wC+1o/4TbXf+hI1f/vtaPqNby/8Cj/mH1qn5/c/8hP+EG1j/ocdS/79rR/wg2sf9DjqX/ftaX/hNtd/6EjV/wDvtaP+E213/oSNX/77Wn9RreX/AIFH/MPrcPP7n/kJ/wAINrH/AEOOpf8AftaP+EG1j/ocdS/79rS/8Jtrv/Qkav8A99rR/wAJtrv/AEJGr/8Afa0fUa3l/wCBR/zD63Dz+5/5Cf8ACDax/wBDjqX/AH7Wj/hBtY/6HHUv+/a0v/Cba7/0JGr/APfa0f8ACba7/wBCRq//AH2tH1Gt5f8AgUf8w+tw8/uf+Qn/AAg2sf8AQ46l/wB+1o/4QbWP+hx1L/v2tL/wm2u/9CRq/wD32tH/AAm2u/8AQkav/wB9rR9RreX/AIFH/MPrcPP7n/kJ/wAINrH/AEOOpf8AftaP+EG1j/ocdS/79rS/8Jtrv/Qkav8A99rR/wAJtrv/AEJGr/8Afa0fUa3l/wCBR/zD63Dz+5/5Cf8ACDax/wBDjqX/AH7Wj/hBtY/6HHUv+/a0v/Cba7/0JGr/APfa0f8ACba7/wBCRq//AH2tH1Gt5f8AgUf8w+tw8/uf+Qn/AAg2sf8AQ46l/wB+1o/4QbWP+hx1L/v2tL/wm2u/9CRq/wD32tH/AAm2u/8AQkav/wB9rR9Rr+X/AIFH/MPrcPP7n/kJ/wAINrH/AEOOpf8AftaP+EG1j/ocdS/79rS/8Jtrv/Qkav8A99rR/wAJtrv/AEJGr/8Afa0fUa3l/wCBR/zD63Dz+5/5Cf8ACDax/wBDjqX/AH7Wj/hBtY/6HHUv+/a0v/Cba7/0JGr/APfa0f8ACba7/wBCRq//AH2tH1Gt5f8AgUf8w+tw8/uf+Qn/AAg2sf8AQ46l/wB+1o/4QbWP+hx1L/v2tL/wm2u/9CRq/wD32tH/AAm2u/8AQkav/wB9rR9RreX/AIFH/MPrcPP7n/kJ/wAINq/fxjqZHtGorQ0LwLY6bqCajeXV3qmop9ya7k3CP/dXoKof8Jtrv/Qkat/32tDeKfFl4pjsPB80EjcCS7uFVF9yByaX1Kr1t/4FH/MPrUH3+5jfiewvbnw7o0PzXFzqEcxUdo4+WNegDoK47wp4YurbU5da8QXK3msyrsBQYjgT+6g/rXYjpSxM4JRpQd1Hr5vew6MZXlUkrXCiiiuQ3CiiigAooooAKTFLRQAmOaKWmSSLGheRgqjkkngUAPxRisRvEunhiEaWQA4ykZIo/wCElsf7tx/36NTzIj2ke5t4oxWJ/wAJLY/3bj/v0aP+Elsf7tx/36NHMu4e0j3NvFGKxP8AhJbH+7cf9+jR/wAJLY/3bj/v0aOZB7SPc28UYrE/4SWx/u3H/fo0f8JLY/3bj/v0aOZdw9pHuW9eTfouoLj71vIP/HTWL8L23+A9GP8A0wA/Imrc3iHT5oXjdbja6lT+6PQ1V0TUtJ0fTILCzS6FvCu1A0ZJ60c0e4c8e51OKMVif8JLY/3bj/v0aP8AhJbH+7cf9+jRzLuHtI9zbxRisT/hJbH+7cf9+jR/wktj/duP+/Ro5l3D2ke5t4oxWJ/wktj/AHbj/v0aP+Elsf7tx/36NHMg9pHubeKMVif8JLY/3bj/AL9GhfEunkgO0sYPd4yBRzIPaR7m1S4pkUiyxh42DIwyCDkGn1RYYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYoxRRQAYpMUtFACYpaKKACiiigAoopGYKpJ4AGaAFoqnHqVrJJAiTIWnUvGP7w9qbNqtnAu6WYAbzHwCfmHUUXFdF6iqsF/bztGIpVcyKWTHcDr/ADqCfWbGBS0k4ADFCdpOCDgilcLo0TWF4r+e3soD9y4uo439wf8A9Va9tcR3MIlhbcjdDjFZHif7+k/9f0f9aUtianwmzFCkUapGiqqjAAHSn7R6CjNQR3UclzNAM74gC2Rxz0p3RWhPtHoKNo9BSI6uMqQR6g06mMTaPQUbR6ClpsjqgyxAHuaT0FoLtHoKNo9BSBwehHTNRmcCRUwSGBO4dBii6DQl2j0FG0egpqSK/wB1g3rg0+mGgm0ego2j0FBbHWqaahH9naaYGGPfsBfvzgGk2luDsi5tHoKNo9BTVkB6EHvSNKqrksPbnrRdWuGg/aPQUbR6CqlnfLdRwuiOFlTeCRwPb61YEqnJDqQOvPShNPVBdbj9o9BTJokkjZHRWVhggjrTXuI0jZ2kUKgyxz0p0ciyoGQgqeQRRdbBoYvhX93FfW4z5cF06IPQccVvVheGf9dq3/X6/wDSt2lHYmn8IUUUVRYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABUc43QuBySpH6VJRQByVvpU81vYqyPFNDa/I5H3JAwIH+e1EFpf3CWjNG9tMbuWRyVDbAQea63FGKnlRPIjn/sZ0q5s3ijmniVJFdlGWDMQckfhURsrg+H7tTC4lmufNWPHzBS4P8hXS4oxT5UHKhAMCsPxP9/Sf+v6P+tbtYXif7+k/wDX9H/WlLYmp8JuVy+qRzS3GrC2yT+53ADOV7jHfiuoxkVXtLOC03iBNu87mJOST9TUVIc9kVKPMZvh2ERvcOkmVfb8ghMSgjuAfw/Ktukx0pauEeWNgirKwVkeIoUlhhLsQyPuUGIyK3HQgVr0honHmjYbV1Y5yznaG4tp57Z4Ea3KBEjJAIbpgDjPXmobOB3t7QTxzRobacOQpyuWGPxrqMUYrL2N92R7PzMTw6PLkmiWFfLULiZYTFv68EHuPWt2m4Ap1aQjyqxUVZWMvxFHPLpUy2wJfjIAySuRnjvxmsKSyVrK4dR5yeZCxiW2ZANrDJAPU49K7Gm7aidJTd2KUOZ3OeWRbe9u2jil8ma3QQhIjgkbuOnHUdaq20Kqbc6jbyOgtESMGMttb+IYxweldXgUYpex8xcnmcnZpcW1vauttKxjsGXYyn72RwfeoWglaPUBDGdklqAPKt2iUtk9j1NdlilxU/V+l/62F7Lpc5u/05UkkjtrcBZLN1YKnDNxjPv1rW0byv7PhEEflqFAK7NmD34xV3FLitI0lF3RShZ3MPw1/rtW/wCv1/6Vu1heGv8AXat/1+v/AErdq47Cp/CFFFFUaBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFZmvWMl7ZqICBPE4ljJ6bhWnRik1dCaurMwE1u5iULc6VeeYOD5ahl/A07+33/6BWo/9+v8A69buKTFKz7k8su5h/wBvv/0CtR/79f8A16P7ff8A6BWo/wDfr/69bmKMUWl3Dll3MP8At9/+gVqP/fr/AOvR/b7/APQK1H/v1/8AXrcxRiiz7hyy7mH/AG+//QK1H/v1/wDXo/t9/wDoFaj/AN+v/r1uYoxRZ9w5ZdzD/t9/+gVqP/fr/wCvR/b7/wDQK1H/AL9f/XrcxRii0u4csu5h/wBvv/0CtR/79f8A16P7ff8A6BWo/wDfr/69bmKMUWl3Dll3MP8At9/+gVqP/fr/AOvR/b7/APQK1H/v1/8AXrcxRiiz7hyy7mH/AG+//QK1H/v1/wDXo/t9/wDoFaj/AN+v/r1uYoxRaXcOWXcw/wC33/6BWo/9+v8A69NbXLiRStvpN75h6eYu1fxNb2KMUWfcOWXczNBsZLK1f7QQbiaQyyY6AmtSjFFNaFJWVkFFFFMYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAf/Z
">
<center>Figure1: High level view of a <a href="https://jalammar.github.io/illustrated-transformer/">transformer</a></center>
1. The **encoders** are all identical in structure (yet they do not share weights). Each one is broken down into two sub-layers:
- **Self-Attention Layer**
- Say the following sentence is an input sentence we want to translate:
**The animal didn't cross the street because it was too tired.**
What does **"it"** in this sentence refer to? Is it referring to the **street** or to the **animal**? It's a simple question to a human, but not as simple to an algorithm. When such data is fed into a transformer model, the model processes the word **"it"** and the **self-attention layer** allows the model to associate **"it"** with **"animal"**. As each word in the input sequence is processed, **self-attention** looks at other words in the input sequence for clues that can lead to a better encoding for this word.
- **Feed Forward Layer**
- The outputs of the self-attention layer are fed to a feed-forward neural network. The exact same feed-forward network is independently applied to each position.
2. The **decoder** has both those layers (**self-attention** & **feed forward layer**), but between them is an **attention layer** (sometimes called **encoder-decoder** attention) that helps the decoder focus on relevant parts of the input sentence.
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA28AAAEXCAYAAAA6IXx5AAAYJ2lDQ1BJQ0MgUHJvZmlsZQAAWIWVeQdUFE2zds/OBliWJeeck+QMknPOGYEl55xRiSJBRRBQBFRQQVDBQBIxIYgoIqiAAZGgZBUUUATkDkHf7773P/89t8+ZmWerq6uf7qruntoBgI2ZFB4ejKIGICQ0OtLaQJvb0cmZGzcOMIAckAEVQE/yigrXsrQ0BUj58/zvZWUQQFvPl+Jbtv5n/f+30Hj7RHkBAFki2NM7yisEwQ0AoFm9wiOjAcD0IXK+uOjwLbyEYPpIhCAAWLIt7LeD2bew5w6W2taxtdZBsC4AZAQSKdIPAOKWfe5YLz/EDjEcqaMN9Q4IRVQzEKzu5U/yBoC1A9HZExIStoUXECzs+R92/P6bTc+/Nkkkv794ZyzbhUw3ICo8mJTwf5yO/72EBMf86YMXuQj+kYbWW2NG5u1SUJjJFiYguC3U09wCwbQIfhzgva2/hd/6xxja7erPe0XpIHMGGAFAAW+SrgmCkblEMcYE2WntYhlS5HZbRB9lHhBtZLuLPSPDrHfto2JDg81Nd+1k+fsY/cFnfKL0bP7o+AboGyEYiTRUQ6K/rcMOT1RHbIC9OYKJCO6LCrIx2W07kuivY/5HJzLGeoszP4KXfCP1rXd0YOaQqD/jgiW8SNt9MSNYM9rf1nCnLezoE+Vo+oeDt4+u3g4H2Nsn1G6XG4xEl7b1btvM8GDLXX34jE+wgfXOPMPXomJt/rR9EY0E2M48wOOBJGPLHf7wSni0pe0ONzQamAIdoAu4QQxyeYIwEAgCeueb55FfOzX6gAQigR/wAeK7kj8tHLZrQpG7DUgEnxHkA6L+ttPervUBsYh846905y4OfLdrY7dbBIFJBIegWdHqaFW0KXLXRC4ZtBJa+U87bqo/vWL1sLpYQ6w+VuQvDy+EdTByRYKA/4fMBHn6IKPb4hL6Zwz/2MNMYvox45gBzCjmDbAHH7et7Gq5B6RF/os5NzADo4g1/d3ReSI2Z/7ooAUR1vJobbQawh/hjmZEswJxtBwyEi20BjI2eUT6nwxj/nL7Zy7/3d8W6/8cz66cKEqU32Xh+dczOn+1/m1F5z/myBt5mvxbE86Cb8Jd8AO4G26DmwE3fA9ugXvgO1v4byR83I6EP71Zb3MLQuwE/NGRuiw1I7X+P3on7TKI3PY3iPaJj95aEDph4QmRAX7+0dxayI7sw20U6iWxh1tGSloJgK39fWf7+G69vW9DjM//kflMA7AXiXHyvn9kgScAqO0EgCnnH5mgCwAsewC4/sIrJjJ2R4beumEAHlAhK4MFcAI+IIyMSQYoAFWgCfSAMbAAtsAJuCGz7g9CENZxYD9IBZkgFxwHReA0OAvOg0vgKrgBmkEbeAAegaegDwyAd0hsfAJzYAGsgDUIgnAQJUQHsUBckAAkBslASpA6pAeZQtaQE+QB+UGhUAy0H0qHcqEC6DRUAdVA16Fb0AOoG+qH3kBj0Az0DfqFglEEFD2KAyWIkkQpobRQJihb1D6UHyoClYjKQB1DnUJVoq6gmlAPUE9RA6hR1BxqGQYwBcwI88DisBKsA1vAzrAvHAkfhHPgYrgSroNbEV+/hEfheXgVjUXTobnR4kh8GqLt0F7oCPRB9BH0afQldBO6A/0SPYZeQP/GUGLYMWIYFYwRxhHjh4nDZGKKMVWYRkwnsnY+YVawWCwjVgiriKxNJ2wgNgl7BFuOrcfex/ZjJ7DLOByOBSeGU8NZ4Ei4aFwmrgR3BXcP9wL3CfeTjIKMi0yGTJ/MmSyULI2smKyW7C7ZC7IpsjVyanIBchVyC3Jv8gTyPPIL5K3kz8k/ka/hafBCeDW8LT4Qn4o/ha/Dd+KH8d8pKCh4KZQprCgCKFIoTlFco3hMMUaxSqAliBJ0CK6EGMIxQjXhPuEN4TslJaUgpSalM2U05THKGsqHlCOUP4l0RAmiEdGbmEwsJTYRXxC/UJFTCVBpUblRJVIVU92kek41T01OLUitQ02iPkhdSn2Leoh6mYaORprGgiaE5ghNLU03zTQtjlaQVo/WmzaD9jztQ9oJOpiOj06Hzosune4CXSfdJ3osvRC9EX0gfS79Vfpe+gUGWgY5BnuGeIZShjsMo4wwoyCjEWMwYx7jDcZBxl9MHExaTD5M2Ux1TC+YfjCzMWsy+zDnMNczDzD/YuFm0WMJYslnaWZ5z4pmFWW1Yo1jPcPayTrPRs+myubFlsN2g+0tO4pdlN2aPYn9PHsP+zIHJ4cBRzhHCcdDjnlORk5NzkDOQs67nDNcdFzqXAFchVz3uGa5Gbi1uIO5T3F3cC/wsPMY8sTwVPD08qzxCvHa8abx1vO+58PzKfH58hXytfMt8HPxm/Hv57/M/1aAXEBJwF/gpECXwA9BIUEHwcOCzYLTQsxCRkKJQpeFhoUphTWEI4QrhV+JYEWURIJEykX6RFGi8qL+oqWiz8VQYgpiAWLlYv17MHuU94TuqdwzJE4Q1xKPFb8sPibBKGEqkSbRLPFFkl/SWTJfskvyt5S8VLDUBal30rTSxtJp0q3S32REZbxkSmVeyVLK6ssmy7bILsqJyfnInZF7LU8nbyZ/WL5dfkNBUSFSoU5hRpFf0UOxTHFIiV7JUumI0mNljLK2crJym/KqioJKtMoNla+q4qpBqrWq03uF9vrsvbB3Qo1XjaRWoTaqzq3uoX5OfVSDR4OkUakxrsmn6a1ZpTmlJaIVqHVF64u2lHakdqP2Dx0VnQM693VhXQPdHN1ePVo9O73TeiP6vPp++pf1FwzkDZIM7htiDE0M8w2HjDiMvIxqjBaMFY0PGHeYEExsTE6bjJuKmkaatpqhzIzNTpgNmwuYh5o3WwALI4sTFu8thSwjLG9bYa0srUqtJq2lrfdbd9nQ2bjb1Nqs2Grb5tm+sxO2i7Frt6eyd7Wvsf/hoOtQ4DDqKOl4wPGpE6tTgFOLM87Z3rnKedlFz6XI5ZOrvGum6+A+oX3x+7rdWN2C3e64U7mT3G96YDwcPGo91kkWpErSsqeRZ5nngpeO10mvOW9N70LvGR81nwKfKV813wLfaT81vxN+M/4a/sX+8wE6AacDFgMNA88G/giyCKoO2gx2CK4PIQvxCLkVShsaFNoRxhkWH9YfLhaeGT4aoRJRFLEQaRJZFQVF7YtqiaZHXnV6YoRjDsWMxarHlsb+jLOPuxlPEx8a35MgmpCdMJWon3gxCZ3kldS+n2d/6v6xA1oHKg5CBz0PtifzJWckf0oxSLmUik8NSn2WJpVWkLaU7pDemsGRkZIxccjg0OVMYmZk5tBh1cNns9BZAVm92bLZJdm/c7xznuRK5Rbnrh/xOvLkqPTRU0c3j/ke681TyDtzHHs89Phgvkb+pQKagsSCiRNmJ5oKuQtzCpeK3Iu6i+WKz57En4w5OXrK9FRLCX/J8ZL10/6nB0q1S+vL2Muyy36Ue5e/OKN5pu4sx9ncs7/OBZx7XWFQ0VQpWFl8Hns+9vzkBfsLXReVLtZUsVblVm1Uh1aPXrK+1FGjWFNTy16bdxl1OebyzBXXK31Xda+21InXVdQz1udeA9dirs1e97g+eMPkRvtNpZt1DQINZY10jTlNUFNC00Kzf/Noi1NL/y3jW+2tqq2NtyVuV7fxtJXeYbiTdxd/N+Pu5r3Ee8v3w+/PP/B7MNHu3v7uoePDVx1WHb2dJp2PH+k/etil1XXvsdrjtm6V7ltPlJ40P1V42tQj39P4TP5ZY69Cb9Nzxectfcp9rf17++++0Hjx4KXuy0evjF49HTAf6B+0G3w95Do0+tr79fSb4DeLb2Pfrr1LGcYM57ynfl88wj5S+UHkQ/2owuidMd2xnnGb8XcTXhNzH6M+rn/KmKScLJ7imqqZlplum9Gf6Zt1mf00Fz63Np/5meZz2RfhLw1fNb/2LDgufFqMXNz8duQ7y/fqJbml9mXL5ZGVkJW1Hzk/WX5eWlVa7frl8GtqLW4dt35qQ2Sj9bfJ7+HNkM3NcFIkaftVAEYulK8vAN+qAaB0AoAOyePwxJ38a7fA0FbaAYA9pIfSgpXQzBg8lgwnReZEno6/R8BSkojN1HiaYNon9PIMZUyAOYill02B/TjHHJcmdx5PPx+eX1nASTBIKETYVURblEN0UezRnhLxIAk1SUrJD1L10ikyVrI8sp/lbskfUrBSZFf8pFSnHK+ipYpXfbm3TM1bfY/6N41mzf1a2toE7Q86d3Vr9cr18w0OGpKMNIyZjRdNekzrzMrNKyzaLCesMTYstqx21Paw/brDmhNwJnchulLuQ+9bdht37/O4T7rpWeVV4p3jk+Dr52frrx0gFygaxBPMEkIVCocuhY2H90XcjrwQdSw6OSYztjEeneCTeH8/OCB4UCXZKMUlNSbtWHpRRtIhuUMTmXmHLbMEsilyQC7qCM1R4WPqeebHHfKdC5xPOBbaF9kWW500P2VSYnBau1S9TLlc9oz4WdFzUhUmlennRy8aVV2pnquhqRW4LH1F9apunVm9wzX36/43wm/GNRxsTGs61JzVknsrr7Xodllb1Z2Gu533hu6PPhhsr3/o28Hc8biz+FFcl+/jfd0OT6yemvQYPDPstX0e0Xeu/81LileSAzqDRkN6r5XeCLwlvl19Nz38+v2DkfMf0kf9xuzGzSfMPlp8spg0nlKeZpoencmZlZsdnbs0n/jZ8AvZl5qvBl8nFs4vxn9z+26xZLYcuNL+8/Cv5g3dzc1d/0vDaHgGPYqZwC6QweQKeH+KMsIoUZQqjvoRLQtdAv0rRhmmNOb3rPJsmex9nKxcjtz5PG28w3zL/CsCs4LPhM4LR4qoi5KJvhI7uydQXF78t8QjyWNSDtJc0lMydbKxcmrykHynQo6ihRKd0qByiYqLKofqMBIFruos6kMaJzVdtAS11rQHdK7rHtHz0d9rQGMwadhmVGQca+Jj6mnmbx5mEWLpaWVhrWojastmR7RH2a84TDkOOj10rnMpdc3Zl+gW4O7ooUuS9GT2grxmvQd8Onwb/ar8iwMyAsOCnII1Q4RCKZFIGAsfiViK4ol2jymJfRD3On4iYT5xdT/FAc6DwsncKdiUD6mNaXnpkRluh+wyHQ8HZKVnl+dczW080nS04dj1vKvHa/IvFpw7UVpYVJRXnH0y7VRCSdhpv9KAspTye2dFzl2qFDpfcOHlxdVq4iXWGr5aUSQOFK+q1+nWm11zuh58I/Pm+Ya7jf1NI83TLd9b4dtMbWJ3VO9q3lO8z/MA9WC8vethY0d1Z+mj412HHid2Rz6Jfprd09bL+PxA3/sXrC81XtkO+A6mDF18/fzN0jvaYfH3piPhH06O3h57MT4yMf5xbhKDeD91pn+OZl7qs/wXwa9UX38uTC4OfXvy/dZSxXLyiv0PoR8rP9tWE3+prhHWdTdmdv0vAc2hymE3tAgGh1nEzuBmycbJFynwBAFKLaIzVSr1FZp+2k16AQY9xkCmQ8xnWRpYO9kesz/iuM1ZwRXPrc39i+cCrwnvHF8WvxB/u4CbwKpgoZCU0BNhPxGcSLWooeiUWOYe4T2d4l4SQKJccq/ka6kY5O2mXsZUZlo2XY5TrkXeWn5e4ZAil2Iz8tYyrZyswqhyWVVL9cVer71f1JLUceqlGnIag5qJWpxaLdoW2m90/HU2dSv1LPXJ9R8a7DeUM5w1qjR2NWE2GTQtMrMxpzLvtki3VLVcsqq3DrIRsvloW2G3z57F/pVDnqOh46ZTo3OwC7/Le9fifeb7VtwK3QXcGzy0PN6S4j15PV8j+4i/j4Gvop+yv1EAKTAkiBSsEUIdMhx6MSwkXD58PeJhZE6UZTRD9LuYs7HecYJxk/FnEvQShhODk+iTXu6/feDuwY7khym3UmvSitPTM8IOuWTqHRbNwmS9yi7Jcc7lz107Mnr02bFbeeeOH8x3KVA5wXpitXCw6EbxyZNHTxWUVJy+Wfqo7HX57Jm1c5QV3JWy5w0vuF4MqzpYnX3pSE1KLemy4hXilW9XP9etXiNc57whc9OyIamxoelni/Kt8NaS29faWu7cvtt9b/mBQfutDpvO5a7ibtknr3qO9nr0Gb3QeqU9GPyGODw33ju7vLS65f+d/+G2ClYBgBOpSIaaCYCdBgD5HUieOYDknXgALCkBsFUGKEFfgCL0AEhl7O/5ASGnDRZQABrADLiAEJACKkhubAGcgS+SE6eCPHAG1IG74DkYA0tI5sgOSUMGkDsUB+VDV6DH0CQKixJGmaKiUOVInreJ5HWx8C34N9oAfQI9jpHFZGE+YFWwJdg1JMN6QqZIVk3ORp6Pp8BnU+ApjhNYCdWUcpRtRDViK5US1W1qQ+p3NNG01LRX6XTp+ult6fsZLBheMLoz/mQqYVZjHmE5wMrG2srmxk7O3sYRyynH+Z3rBnckjzzPOm8XXzG/v8BeQaLgqNBN4SwRT1EtMcE9xD1r4l8kPkoOSDVKJ8lIy4zIZsnJy32Vb1EoUExQ8lY2VZFSZdpLVJNQL9UU0zqq3a3zVY9Mn8GAxZDdiN9YzsTcNMLslHmHxTcrPmsHm2O2XfZoB13HTKceF0ZXz321bh89sCQaT6znstcn72GfWT8qf5OAosCp4L0hhaFfwo0jaqMI0RExb+P041sSxZOqDnAfLE1hTM1Px2ekHlo+HJg1l5N7JORYYz7NCdbCz8U1p9xPM5b2lR89a3BuuTLvAv3FrKqVS0E13y4fv6pXT3Nt8cZkw3TTXMtU60Tb4j2mBzoP3To9umy6NZ5KPhN5rtAf+vLnEPot+fDZD3Rjdz8Rp/fPaX2u/7r2TWFJfwX/4+jPJ6vTvz6tvVlv2Dj+23NTanv/2PI/DhAALWABPEAUyAI1YAhsgQcIAUkgG5SAGnALPAXvwQKEgVghqW3vJ0CF0DWoF/qMokLJopxR6agbqE8wF+wOX4Dn0QroDPQARgSTihlGfF+KAzh/3ACZHlkLuSR5LV4Ef4VCjuIewZIwQRlPJCcWUfFQXUPy13c0cbSMtM109nSf6Q8w4BlOMYozPmEKY2Zivs8SwErPep8tjJ2ffZijhNORi5nrDXc5jzevFB/ge8V/WSBD0FVIDsnlZkV6RG8ip1ieeLrEfsloKS9pTRmCTK9sjpyJPJP8osIbxS6lJuVKlSOqiXtj1bLVWzR+aMlqe+vk6lbpNenfNrhteMeo23jMFGUmam5vcciy2Wreht/W3a7cfsSR1ynQuckVt8/B7bR7p0c/qd2zxivLO8DH2tfQz8k/LeB+EGWwZ0hbGGt4YsT7KO3omliquPD4p4k8SbH7+w7KJ19IZUsrzMAfSsqczyJlj+cmHpXKQx1/X3C9MLZY7uS3kuulMeUqZ36dq6qUOV9+YapKqNr/0rVapstlV9XqPl8ruaF8s7eR1LTWUtlq1Qbu1Nwzvb/YfrbD85HKY54n6KfPnsU+x/blvCC8rBxwHzJ7E/yu+v3UKNe45cfUybszTHPHvwguPPteuHJk1WhNZv3Mxsffi7v+RwNyQI2sfh4gBhSADrAEbojvDyArvwI0gMdgBFn3BEgQ0oT2QUlQKXQHGkORI14noYpQfTAD7APfQbOjU9CzGCfMM6wO9g5ODfeAzJTsPXkUngp/jcKeABOaKSOI0sSfVJ3UJTQxtE50RvTGDFaMxkyKzCIs8qzubAns0RyenLZc5txmPGa8pnxm/NYC7oJRQkeFa0Uei87soRRXlPCVPC01KMMq6y1XL7+maKn0TCV7r5M6RuO45rq2iU464sFm/TaDu4a9RmsmJqZN5hIWV6wkrJtsdewGHUKc8M5XXO3daDwoPN29XXw++qn65wZMBlkH94Sahb2IcImcjk6K5YwbSXiUdP9AebJdyq+0igz7TK7DC9l3co8c9c0zyGcpeFroW7RyMr2E5nRlmUL5s7O+FVBl2QWliwPVMTVstY+vJNcZXJO8od+Q3FTZktfq1MZ0Z+he6QOnh7iOi4/kum536z0Z6onvleyD+xdeTg/0D+W/EXpb/u73e72RnA9Px6jG7SbOfZyZlJ4Kmj4383h2dh7zmf2L1FfdBYdF0jfv75ZLvEvLy0dX2Fdqfyj/OP1j9afDz6ZVxtXI1abVtV+avzJ+da8R12zWTq71rZOta67Hr19fn9ng2XDaKNh4srHxW/q39++Tv5/+/r0pvemzeWqzZ8v/Ub6yMtvHB0TQBgAzsrn5XRAAXAEAG/mbm2uVm5sb55FkYxiA+8E733a2zxpqAMq2vvGApy2/Uv79jeW/AP1xyAFVitPKAAABnWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDxleGlmOlBpeGVsWERpbWVuc2lvbj44Nzk8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+Mjc5PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+Csfa0rMAAEAASURBVHgB7J0HfBzVtf+Peu+y1S3JlmW54W5MbIoLrmBsisGFGvLoCZCQ8gKP8JL/S3h5CSEQICF0jGnGFDcMtnED914ky13Fsq1m9a7/PXd1Z2elXRV7d7S7+h1/VnNn5s4t31nf2TPn3HM9moUQBARAAARAAARAAARAAARAAARAwKkJeDp169A4EAABEAABEAABEAABEAABEAABSQDKG74IIAACIAACIAACIAACIAACIOACBKC8ucBNQhNBAARAAARAAARAAARAAARAAMobvgMgAAIgAAIgAAIgAAIgAAIg4AIEoLy5wE1CE0EABEAABEAABEAABEAABEAAyhu+AyAAAiAAAiAAAiAAAiAAAiDgAgSgvLnATUITQQAEQAAEQAAEQAAEQAAEQADKG74DIAACIAACIAACIAACIAACIOACBKC8ucBNQhNBAARAAARAAARAAARAAARAAMobvgMgAAIgAAIgAAIgAAIgAAIg4AIEoLy5wE1CE0EABEAABEAABEAABEAABEAAyhu+AyAAAiAAAiAAAiAAAiAAAiDgAgSgvLnATUITQQAEQAAEQAAEQAAEQAAEQADKG74DIAACIAACIAACIAACIAACIOACBKC8ucBNQhNBAARAAARAAARAAARAAARAAMobvgMgAAIgAAIgAAIgAAIgAAIg4AIEoLy5wE1CE0EABEAABEAABEAABEAABEAAyhu+AyAAAiAAAiAAAiAAAiAAAiDgAgSgvLnATUITQQAEQAAEQAAEQAAEQAAEQADKG74DIAACIAACIAACIAACIAACIOACBKC8ucBNQhNBAARAAARAAARAAARAAARAAMobvgMgAAIgAAIgAAIgAAIgAAIg4AIEvF2gjWgiCIAACIAACICAHQgU7TpG5ccKTCV5EPmEBJBfZAiFZiSQb1iQRQ3Fe09QWVa+xTHeCRuURBFDk7Xj9RXVVH40nypzCqm2qJz8e4dRYEIUhaTFyfI5Y2NdAxVtO0pVZ4upvrSSfEIDyT8mjKKvHEDegX5aWQ3VtZT75Q5t39PXW7QvmALiIiikfzx5eIhGt0hjbT3lfL5N7Zq3IkvKvAly//zmw1SVVyzTHl4e5B0cQIGJURQ2MIk8vfD+2gwNKRAAAVchAOXNVe4U2gkCIAACIAACl0mgtriCKk+fl8qQh4+XVLhKD5ym81sOU++rB1P0mP5aDXUtef1jwomVKE2am7VkfXk1nVy8gerLqoQyFk5+USFSgbt4OIeC+vSS+WqLy+n0x1tkHr9eYeQfG0H1F6uoaMcxKj2UQ8m3XCXaEynzNjc0yfZ5CYXOv1cocfnc3qZtDRSQEEmJN44lX6H4sTQ3teQN8CW/6FB5rPWfmgsXTf0V1zZVNVDFyXNUuv8UcZ9Tbr+aPDzNymDra7EPAiAAAs5IQDcaO2Pz0CYQAAEQAAEQAAF7E4ifMYr8WxQetpblfrWdzq0/IBSmMApO6W1RXfy0ERQgFK7W0lTfSKc/EUqZULCS5lxJoekJWhY+xlY9loK1+6XiFjdlGEWO7KflKT92ls4s+4Hyv95Dfe+eZGFVC07uJRU1zsxKWuH2bDq/8RDlr9xFKXdcrZXBicDEaOozd5zFsdY7ybf8iLz8faUF8OTi76hKWAn5EyTqgYAACICAKxGAz4Ar3S20FQRAAARAAATsTICtZQmzRstSz2082OnSq4ULZG1hGYULF0q94sYFKMWttqhMWrvYsqZX3DgPu1XydTXn2Tp2gQ9ZFQ9PT+o1boBwm4yjyjMXqOLUOav5OnPQS1gQVVvrK2s6cwnygAAIgIBTEYDy5lS3A40BARAAARAAAeMJsMXNNyKIai+UUbPOLZJbwopVWVae9mmoqpUNrD5bIrehQgmzJWzVY7Fl4dJcK4WS15GEpMXLLKzs6aVBzLnTt68yt1B/2iLN8+TKxPw8FrbuQbqXwNLPl3dvA1A7CLggAbhNuuBNQ5NBAARAAARAwN4EfEICqa6kkhoqajTLGddxboOlNS5l/jUyyEjN+VLZBJ9Qk3uktfbUl1XLw97B/tZOiwAipuMqn9VMLQdVPTxfTi+sROZ8YQ5cEiTcPoNaApaofGc++4HYzbPmXCl5eHtSnHAF9Q6y3iZ1DbaOJXAk8yh98NFSGpSRTgPFBwICINA5AlDeOscJuUAABEAABEDArQk0CDdCDuDRWqlJmjOO/ETwECXKJZLnkLE01tSrU2223kGmSJKN1XVtzvGBxhrTcX3ESasZxUFWKlm8W+bSyR3xh5W1uOuHq13y9PbS0irhISJLerYEJ4kc3pcih6WqU9h2E4GPPv1c1szb3z39y25qBaoFAdcjALdJ17tnaDEIgAAIgAAI2JUABxipK6kQrpPBbSIwssXLTxxXH6Uc+YSblhaoFtYsW+Lbkqe1q6PKr46zy2ZHwgFGWHiOnl48fcRyArr2KeVSnyfppisp9Y5rhPtmbyrec0JGxNSfR9pYAmx1O3DoiKyUt7wPAQEQ6BwBKG+d44RcIAACIAACIOCWBHgeGEd8bG5qpqgrO+++Fpou1l0T1iwO+a/mwbUG5B8TIZWt8ux8qi4wzZFTeerE8gIcsp8tfcEpMeqw1S1Hpiw5cEouCRDSN9Zqns4c5IiXYlIf5a/e3WZuX2euRx77EFBWN1Va6311HFsQAIG2BOA22ZYJjoAACIAACICAWxMo2XdShs6vEwtmV4rojQ2VtabFt4eYF99WAErEumjlJ1oW9hYHg0Rofg40wot6R41Np8KtWXT87bUy6iQfY0WuOr9Yhvr3FGvJxU4eRmc+3UInP9hIkcNTyVdYznih7pJ9p6hJLN4dN3WE5Tpyoo6awnK6IMplV062zrHVjdd+SxBLHLALpF7qxDpy5783WXH4uIf41+tHGfosWpqtdlFiLbtCsWB4yd6TFDmir3YOCWMI6K1uqkZlfcPcN0UEWxCwTQDKm202OAMCIAACIAACbkmgeNdxaTVj10deNDtKrL8WnGrd+sVKjl6aRdh+FSUy5prB5BsZTIXfZ1LhD1kym5e/j2nR7Jb1r3nduL53TaSz3+6jYlFWc0OjVMB4we6kSeOkMqgvn9O1YnHt8+LDChu7XkYLi2C0qNfLz6d1VukCeWGzWXnjDLaUN3nuqgwqFYuIcyAWXq7Amptlm0pwwG4EbFnZMPfNbohRkJsT8BAhgZvdvI/oHgiAAAiAAAiAgIMJcDTHprr6NgFP9NXyTw4OPMKukuxyCelZBNjq9vRzf7TZ6T88+xtEnrRJBydAwETA0vcAVEAABEAABEAABEDgEgiwi2TrSJWti/Hw8JCWLihurcn0jH1bVjfV+47Oq3zYgkBPJgDlrSffffQdBEAABEAABEAABAwgYG2uW+tq1dy31sexDwIgYCYAt0kzC6RAAARAAARAAARAAAQcQKDg3HkqLCrWSn72989r6eee+ZWWjo6KpNiY3to+EiAAApYEELDEkgf2QAAEQAAEQAAEQAAE7EyAFTJbStmQQdajg9q5CSgOBNyCANwm3eI2ohMgAAIgAAIgAAIgAAIgAALuTgDKm7vfYfQPBEAABEAABEAABEAABEDALQjAbdItbiM6AQIgAAIg0CMJNDQRFVaKTxXRxRqiqjqxSFojkQjbT3yuSawGxCsC8RYCAs5K4IO9ztoytKunEuClTER0XOKtt7B1iWi65Cc+gb5EYf5E0YHiE2Q6ZzAjKG8GA0d1IAACIAACIHDZBHIvEmUXEhWUC+XssktDASAAAiAAAnoC8oWXGFzFezD5Mqy6vuWseFmmhJeqjA0h6h9NlBimjjp8C+XN4YhRAQiAAAiAAAjYiQBb2XbmEhVX26lAFAMCIAACIHBJBPjF2VnxAo0/kcISNyqBqJewxjlYoLw5GDCKBwEQAAEQAAG7ENh3lujQubZFRQSYfjDwj4cg4dLjLx7t7OLDrj5e4sNvh+WH/0BAwEkIfKVrx4Lhuh0kQcAJCEiXc9GORuF+zi7o7Ipe00BUKVzTi4Wb+gXxIq1E9xKNj32TTTQ4huiKWJPLpYO6AeXNQWBRLAiAAAiAAAjYhQD/iNh8iohdJZWwYpYuXHXSe4k5GD7qKLYgAAIgAAL2IMBz3Vi8xIswX/EhMc4qz8i+kfIUVQlXSnZfz7pgUvD4KL9gKxPzj8enmObLmXLa9a8Y/SEgAAIgAAIgAAJOSYCDjWw5Zam4xYcS3TiQaHg8FDenvGloFAiAQI8gwC/OhsWZxmMel5XkiBdt/MLNQQLlzUFgUSwIgAAIgAAIXDYBdpXkHwJK2CXnur5EAbC2KSTYggAIgEC3EuDxmMdlHp+VsKcEj98OEChvDoCKIkEABEAABEDgsgnwnIrD583F8A8DfssLAQEQAAEQcD4CPD4P6m1uF7tQcpApO4tHsxA7l4niQAAEQAAEQAAELpfA6qOmifFcDrvk8JvdLkpFRSUdyTxKx06cpPyzBVRcXELlFRVUWVVNdXV11FDfIObjN1GT+ODnQBfhIjsIgIBLEvAQ67d5enqKeE6e5O3jTb6+vhQYEEChIcEUFRlJCQlxNCA9jdLT+lFgoAgI1VX57gRRfpnpqkhx/fQBXS2h3fxQ3trFg5MgAAIgAAIg0A0E2OVm40lTxRychOe4ddJVkhWxHTv30Ko1a+nQ4UyxPjfe0XbDHUSVIAACLk7AUyh5gwdl0PSpk2js6JFS4etUlziQyfIj5iAm16TadR04KG+dugvIBAIgAAIgAAIGElh/3LR2EFfZBXfJQ0ey6M13FtOp0zkGNhZVgQAIgIB7E0juk0j33b2QhghlrlOyN9/s9h4nFvKe2K9Tl3UmE5S3zlBCHhAAARAAARAwigCvJ/TpASJlMJszuMOokuz6uHjJJ/TF8tVtWpmY1If6DxhICYlJFBkdRcHBIRQQEEi+fr4iCra3+HiRciNqczEOgAAIgICbEWAXcfZQaGxsFJ8Gqquto+rqKqqoKKeiCxcoN+cMHT+WTXli29qdfPas6bRowW3S5bJdLGx9+/yQKQuvOnDrUNP6m+1e1LmTUN46xwm5QAAEQAAEQMAYAjxXgudMsISL+RIz258vUSvmrv3lb6/Qrj37TNeIv35+/nTdlOtpwjUTKSw8XDuOBAiAAAiAQOcIlF0spU0b1tOGtd9QTY1Yu61FRo0YRj9//GHyE3Pl2pVVWeaFvK8VrpMJaqG4dq/q8CSUtw4RIQMIgAAIgAAIGEjgQAERf1gGiEW4RyWY0lb+ssXt+f/7u4XiNnjoMJp/1z0UGmqfHwpWqsUhEAABEOgxBMrLymjJ+2/TwX17tT6PHjmcfvnzx9q3wO3KMy3gzVcNjTV9tBIuPYGlAi6dHa4EARAAARAAAfsTKKs1lxnhb05bSb3/wScWitv0WbPpgUd/BsXNCiscAgEQAIFLIRASGkr/8fBPicdXJTt375Wu6mrf6lY/fuvHdauZO38QylvnWSEnCIAACIAACDieQFWduY4gP3O6VeqgiCT55QrzHDf+YTFz9pxWubALAiAAAiBgDwI8vk6bdaNWFM8x5iBRNkU/fuvHdZsXdO4ElLfOcUIuEAABEAABEDCGQJ0IWKLE31ulLLY82Z6jSioZMmw4FDcFA1sQAAEQcBCBWbPnErumK+FxmMdjq6Ifv/XjutXMnT8I5a3zrJATBEAABEAABBxPQP+Q9/GyWt/2nbvp9Jlcec7f35/mL7rHaj4cBAEQAAEQsC8BnlPMQaFYeFkWXlfTqujHb/24bjVz5w9Cees8K+QEARAAARAAAccTaNC9xfWx/phevWad1o5rJ19PPCcDAgIgAAIg4HgCHAyKo/kqWbVmrUpabr1147d+XLfM1eU9XaldvhYXgAAIgAAIgAAI2JtAk1rgTRTs2fYxXVVVTYfEfDcWXp/t6msnyjT+gAAIgAAIGEOAl2Hh8ZeFx2Mel9uIl278tuVa2eaijg/oSu04M3KAAAiAAAiAAAg4mIBYQFYT028DbZcTWdnHqKklT2KfZAoNwzpuFoCwAwIgAAIOJsDrZyYk9ZG18Hh89NjxtjXqx2/dsN42Y9eOQHnrGi/kBgEQAAEQAAHHErCwvOmf/qZqj2abfyT07Zfm2LagdBAAARAAAasE+qX1145nHT2mpbWEfviG8qZhQQIEQAAEQAAEehSB3Lx8rb9seYOAAAiAAAgYTyCxxfLGNeflnW3bgBa3SnlC71HRNmeXjsDy1iVcyAwCIAACIAAC3UugqKhEa0BUdLSWRgIEQAAEQMA4AlG9emmVFRUXa2lHJ6C8OZowygcBEAABEAABOxIoKy/XSgsJQZRJDQYSIAACIGAggeDgEK22svIKLe3oBJQ3RxNG+SAAAiAAAiBgRwLVNTVaaf7+AVoaCRAAARAAAeMIBAQEapVVVVuJNqmdtW8Cypt9eaI0EAABEAABEHAogbq6Oq18Xz9fLY0ECIAACICAcQT0429drXlcdnQLoLw5mjDKBwEQAAEQAAE7Emiob9BK8/b20dJIgAAIgAAIGEfAy8tbq6yhwTwuawcdlIDy5iCwKBYEQAAEQAAEHEGgUbfYq6eVRbwdUSfKBAEQAAEQsCTg5eWlHdCPy9pBByWgvDkILIoFARAAARAAAUcQaILy5gisKBMEQAAEukRA//JMPy53qZBLyAzl7RKg4RIQAAEQAAEQ6C4Czbr1gjz06wh1V4NQb48jUFxcRBvXr6XcnDM9ru/c4crKCtn/49lHe2T/0WkTAf34qx+XHc3H7Kzp6JpQPgiAAAiAAAiAAAiAgN0IbFz3LR3Yv69NeePGT6BRY65sc9xeB86dzadPP1xMN90yj/QLFevL/8ff/qLf1dI/efhR8vX10/ZdMVFWelH2/9rJ11O//umu2AW02YUJwPLmwjcPTQcBEAABEAABEOi5BAoKCijryCGqqqqihsYG7dPc3NTtULhdp0+d0Nqk2qczHHd7G9EAEHBFArC8ueJdQ5tBAARAAARAAARAoIXAXffdTzGxcTZ5lJeVUVHhBYqK7kUhoW0Xduf5OoUXzlNtbY0oJ15YxtouQVF28SKVlhRTbHwC6d3FbFYqTiT1SaHHnnzKZhZVb41YuzA2Lq6NRa6kpIgCA4PJz8+PysvLqPD8ednPGtHOsNAw8vI2/YytFsprdU01BQcHa2XU1tZK90b9MXZ3LBL9DA2LoPCICIt2VVdXUb0I9x4aHk7cLnYJDQ+PEHnDZL7KigrJKCY+njw8PSyuxQ4IGEkAypuRtFEXCIAACIAACIAACBhEgBWSxe+8Rfv37NJqvPKq8XT7orvJu0XxOX3yBL39+mtUVFQo8/j7+9O8BXfS6CuvkvuNjY308eJ36Yctm+S+j48vTbp+mlbepSayj2bS+2/+m0qEQsjiKSL3zbzxJrp++ixNOXz2108J18zbqLS0lNhFNDgkhKaK80s/XkL3P/AIXTFylLz2rX+9SpnC0nf99Bvoxrk3y2NrVi6nb1avoKd++6xQIpPpnX//k3bv3E5qblJ8QiLdesdCSksfIPOv/Xo1bdqwjh7/xa/p5Rf/QuVCWZ1/5z101YRr6KtlS2VZnJGDVEy/Yba8Bn9AoDsIQHnrDuqoEwRAAARAAARAAAQcTGDZpx9LxW3OrfNo6LARtHXLFqGELKdeMTE0dcYNwtJWS/965e/kKxSyBx97XChHobTskw+FwvcmJSWnSCvX9h+2SMWN57bNEMrVOeGquf7bry+r5WwJfP0ffycfYeHjeXO9e8fQ2m9W0/LPP6PIyChNceRKvv16lVS4Jk6ZSn3T+gsLXTyRUN6OZmVK5Y37kJ2dJaxkkZR5+ICmvGUeOSytjGpO3uArhtHwUaNEv1LpfMFZeuPVf9A60Q+lvHFdbMF76a9/pjih2M26cQ6lDxxERw4flIpbL9HGWTfNpcqKclq35vL6z3VBQOBSCUB5u1RyuA4EQAAEQAAEQAAEnIDA3//yvLQIqaY89NgTFNWrN20X1rLk1L7CUjZdnpo5+ybasnE97du9Sypve3btIFakbp53Bw0acoXMM2XaDPrnyy/Swf17pfK2Y+sP8vj8u+6VFqyhw4j8/P3okw/eV9XZ3J44nk3P/OpJ7XxiUjI98OjPaPvWLcSukmzBUm1LSk6m//r1L4RiuMZCeWN3xV8+/TuLwChRUdFCeTsiyz2aeZgaGxpo0tSp9NnHH8r+sFUx78wpGiWsh8rFc/TYcVo7WEFkxezQwf1SgWW3TCVp6Rl03wMPqV1a8cUymVYKMO+ER0TS66+8pOVBAgSMJADlzUjaqAsEQAAEQAAEQAAE7Eygf/8M8g8M1ErlNM9haxLRQfJzc+g/f/4z7VyNmBvGof5ZLpw7J7crv/qC1qxcIdNqvariIlOeYuFO6efnT/GJSfI8/4mLS9DS7SWCxBy0wUOHa1mie0XJ9PmWevulmSM1skLEc/LOC8ueXtjSpqxn6vjAwUNps1BCeR7e4YMHiK1iI0aOkcpbprCU+Yr2ct8HDRmqLqFt32+mXdu30QXBhee+1QmLHfe1ob5ezqlTGUeNtYzSWSzmCrKw1U9JnJj3BwGB7iIA5a27yKNeEAABEAABEAABELADgRnCotY6YMnZ6mpZch/hJnjtpMkWtXh5mX7+KavUhGsmUh9h+dILW+5YPMQcL+G3qD9F9fV1Fvu2dmJi4uiORXe1OR0QYFI061qVU99QTx5eloHQw8LC21yfMXiwVN5OnjguXCUPSSUtTAQgiRXBRNhdMlQEZeG+ZQwcLK9du2Y1fbH0YxqQMUi4Ps6hhIQ+9OWyT+nQgbbLLEREWNYn+8+l6BDU19W3aRMOgIBRBKC8GUUa9YAACIAACIAACICAQQTYGuUlgoAUFOTT0OEjZbp11XEJJgsSBzYZPmpM69NyP1pYwzhSZX7uGRk9kg9mZ2VZzdvZg8mpqTJrtpi31l+4KbJcOH+OykRgktS+aXK/vT/pGQNlgJPDwu2R2zZIWOJYBg4aSgf27aEIYcXrk5JKbPljYescy/y775Vz6hqFxS0n57Q81tEf7j8vxn3iWLbgOEJm52ArEBDoLgJQ3rqLPOoFARAAARAAARAAAQcR8PbxoSnTZtLXK7+iV178K10lFu4OCgqmvLxcGVJ/3PirafiIUbRaLDHAQUmam5pp2MiRVFdXK+aTZdF1k6ZId8RxV19NWWJe2Vv/eo0mT5suQu1foO83b7ysVg8cPEQGBVknLGKVlZUUExNLG0Q0SZYp02d0WLa/fwD1FUre7h3bydvHm/pnmBRAtsh9t3aNcKcspclTzeX0Ey6P2WKO3NbNmyi1Xz/asW0rVZSXd1gPZxgnok1uE3w+WvKeiMh5gaqFRfP7TRs6dS0ygYAjCFjaph1RA8oEARAAARAAARAAARAwnAAHBJk5ew7liOAd7775Or360gsieuNKzQrH66Q98vjPacCgwUJB2Uyv/v0FeuO1V4Sik6mtoTZy1FiaNvNGEa6/mD56/13aK4Kd3P/goxYBUrraMVa+Hvnpk9Sv/wDaJoKqfPrhYhHApJoW3n2fjIrZmfIGDR0i16VLSxugre3GaS9vL6GA1hEriErGTbiaMgYNoTWrlstgLM3C8nbnvfer0+1uWfG79fYFVFtVLefUbRGK2yJxrbLqtXsxToKAAwh4iPUudF68DqgBRYIACIAACIAACHSewAd7zXkXmIM9qIO3LrhPW6vqxdfe0KLpqfPYgoA1AhzcgxeXZusbr1XWWng9t4vCYsXz0QICAlqfJj5fVVVJIWI5AXsKR4qsEm6b9i7XWht5KQAfYZFkq2RXhYObsLVOLdrd1euR3/0IsAr1swd/LDvGcyw//eDNtp3sYDxve0HHR+A22TEj5AABEAABEAABpyHAP7z5hzQL/6DkeU0QEOiIQEdKB3+POIS+LeHzjlCw2PrniHKt9SNAF5HT2vn2jvH/u44Ytnc9zrkfARWZlXtm7YWIo3rc9tWLo2pCuSAAAiAAAiAAApdNwEtnNdH/eLjsglEACIAACIBApwmol2h8gX5c7nQBl5gRytslgsNlIAACIAACINAdBDhAg5IGEVodAgIgAAIgYDyBxsYGrVL9uKwddFACypuDwKJYEAABEAABEHAEAV9fX63YutrOrbelXYAECIAACICAXQjox1/9uGyXwtspBMpbO3BwCgRAAARAAAScjUBQoDmYBK/PBQEBEAABEDCegH78DfD3N6wBZt8Lw6q8/Iq2n9xDR8+daFPQsKTBNDQhg4oqSmjVwXXivAfdOOx6CgsIsci7bM9qCvYLpOsHXWNxvODiBco+f4JOF+VRdX01JYTHUZ/IeMqITSNvLxOqkspS+v7ELjpfVkiVdVUUHRRJydGJdGXKCIvJiicunKbvj+/Uyg/0DaCo4Ejq1yuZEiPitOOcOF2US5uyt1kc453YsN40ZeDV8viHO76ghhbzrI+XD4UGBIt29afU6KQ21+EACIAACICA+xIIaVl4mHtYUdG5tarclwZ6BgIgAALdQ0A//oaFWuoajmyRS1reCiuK6UThGaFg1VJjU6P2aW5ukqxqG+rk+ROFp2n1wfVt+OUU59HZi+ctjp8U5b2xZYlUohqbG4XCF0rHLpyitZmbNaXsyNlsem3je7TnzEEZmjk2tDcVVZbQN4c3ims/pOq6Gq3MitpK2YaymgoZ0vlceSFtPrad3hT5vty3RrZZZa6srZJ5S6ouan3hfuknop8szKGckrPy/LmyC7Tj1D56b+un9F3WD6oYbEEABEAABHoAgcjICK2XxYVFWhoJEAABEAAB4wgUFRZqlUVFRmppRydc0vKmoNw0fCr1DolWu222of7BlHXuOGUVHKcBsf3anFcH2Ir20Y4vKcg3kBZeOZd6hZhD5ZYL5cvTw5NqhaK44sBaobR50j0/ulWznvEaD98e2UQ/CGvcRmE9mzb4WlWs3F7VdxSN6GNaKLKqrppW7P+W9uYcovDAMLqm/5UWecf1HUljUtqu6aMyRQVF0N0/mid3WYF9bcN7tOX4DlmOkSFKVXuwBQEQAAEQMJ5AfFysVmlebo6WRgIEQAAEQMA4ArlnTmuVJSRYetVpJxyQcEnLW2c5TEgbS37evsKFcj3VtROR69DZo1TXWC9cFCdYKG5cT4hQAFn25x0hVr7GCuVK7/bIi/JNyhgv8gUJa9heqtdFnpEX6v6w6+RNw6cLl80gaeGrF3VeqkQLF0xuB1voquvNFr9LLQ/XgQAIgAAIuAaBtL6pWkOzs45oaSRAAARAAASMI3Di+DGtsvT+to1EWiY7JVxaeeN5ZYeFK6P66N0WmY+fjx9NHDCeymrKacNR2+6F+aUFEmd6jG3wbOliSbEyx8zL04uSIuKpSbhtllSVyny2/vh6+4h5an2k0qXKVHnZlVP1hbc8d8+WXCgvEm6U+dLyGCTm70GMJ7D08+XGV4oaQQAEejyBgRnpwiPEQ3LIzTlDF0vbf+70eGAAAAIgAAJ2JlB2sZSU5Y3H4wH90+xcg+3iXNptco2Ya6aXH4+fTwm+ZncSDvAxJmUY7cs9TFtP7qYrEgdSTGgv/SUyXSDmkPkLRY8VK1tysdo0KTxEWM2sCVvTWDhfe66cnEcFUCmtKqO4sBg+JIXdKfmjZHLGBBqfNkbtUrGYX/fuD5+KOsqEknhRzsu74Yop2nkkjCNwJPMoffDRUhokfkTxDykICIAACBhFIDg4iAYPyqADh0xWt80b19Os2XONqh71gAAIgECPJ7Bpw3oZ04JB8HgcqIsC7Gg4Lq28zR87h3gemBKe46aXZmqWgUVmDZ1M/978gZhvtpbuHX+7FoBE5Q3w8ZeBR9hyxvPbrImybtlyUVTHed5cR8LBTFhCW0XBvKb/OKlgqusDfVuFHRWavbew8vm0RL6cOWSShQunug5bxxP46NPPZSW8/d3Tv3R8hagBBEAABHQEZkydrClv3337DV193SQKDQ3T5UASBEAABEDAEQTKy8pow9pvtKKnT52kpY1IWNdUjKjZDnWwBSsyKFz7qHD+rYuOD4+RgUByS8/S7jMHpAKkzxMhgodw4BEOXGJLIgPD5SleTsCacARIFm5Pe8IRJHOK82WWaJ3iyQeC/AK0vnA5/kKp1Au3YYEIqHL3VfOIFc5vjmyU7pf6PEg7ngBb3dQbb97yPgQEQAAEjCQwZvQISkk2LRVTW1tDS95928jqURcIgAAI9FgCS95/m2pqTPEmkvsk0tjRIw1l4dLKW1dITRrwIxkohEP/tw4qMiR+gCyKw+7rw/Pryx8Un048t43dL1vPreP5aefFHLQBYs4cu1/aErbsrc/6Xlr5RiVfIefk2crb3vEAYZGbLIKr8Jw5a+vDtXctzl0+AWV1UyW13lfHsQUBEAABRxHgCMP33bVAK/7QgX208kuTR4B2EAkQAAEQAAG7EuBx9uC+vVqZ9929sI1Hn3bSQQmXdpvcdXq/DO+v2CRHJRJ/rAkHL+Ew/kt3r6QaEfaf13FTMkQs7L3j9D46KhbofvP7D2mgWPyaI0OWinllNQ21NEO4J7J1bny/MWI5gK30+qbFwr1xkIwwyUFGeN03P28/bUFtVS5vs8+fpHLhJlkm5sKdEevLscLF68NNEfPZWsux86csFENeToDn6VmTEUlDpBVx87EdNFgon/rlDazlxzH7ENBb3VSJyvqGuW+KCLYgAAJGEOB5FjfdMJ2+WL5aVrd6xZdyO3P2HCOqRx0gAAIg0KMIsOKmxlnu+OxZ02mIGIeNFpdW3nihar1cJ6xrtpQ3zsdKDgcEOS6iVOqFw/3fNe5WaRU7kJcpF+bm8zyHTh+B8roBVwklKVJErtwqLV48p46XIkiP6SsUvIlt5rBxGZkFx+TH5OIZQVemjqCRfYbKuXh8Xi+s6PFHSWpUkk3ljdvMc97+vXkJfbX/G7r3R7dbLVOVha19CNiysmHum334ohQQAIGuEVg4/zbKyy+gnbtNb4L5h0Vu7hmav+geCgk1v6TsWqnIDQIgAAIgoAiUlV2Uruns4aBk1IhhtGjBbWrX0K2HmOvVbGiNLlAZu0WySworZrZErq8m8gWL9d0gPYMAW92efu6PNjv7h2d/g8iTNungBAiAQKcJfGB2yaEFwzu8rLaujv7yt1do1x7zDwt/f3+6dvL1dPW14sViWHiHZSADCIAACICAJQFehoWj+XJQKJ5brGT0yOH05M8eIj9f23qCyktdHM+169pJQHlrBw5OgYCewO/+8L9aoBL9cZUeOnggIk8qGNiCAAhcOoFLeNg3imBY73/wCX25wuRCqSpnL43EPsnUt1+a3EZFR1NwcAgFBASSr58veYslcvhlJX84LwQEQAAE3J0A2604xgV/Ghrqqa62jqqrq6iiopyKC4soLzeHsrOOEK+j2VrYVZ09HrzEmNkpuYTxvKNyXdptsqPO4TwI2IuAtblurcvG3LfWRLAPAiBgFAH+IXH3ottp1Mhh9OY7i+n0mVxZNf9IyTl9Sn6MagvqAQEQAAF3I8DRfTk4yeCBpiCH3dk/WN66kz7qdhkCBefOU2FRsdbeZ3//vJZ+7plfaenoqEiKjemt7SMBAiAAAl0mcJlvavlt8vadu2n1mnV06HAmNWF2RJdvAS4AARAAAU/hjcCBoXhdTV6ehT0UuiyXOZ5bqw+WN2tUcAwEWhFghcyWUtYdkYZaNQ+7IAACIKAR4B8Y48aOlp+qqmrKyj5GR7OPU17eWSoqLqaLZeVULdYoqhNz5RrqG8R6oSb3IUyB1xAiAQIg4MYE2EWcx0n2WPD28SZfMXctKDCAQoKDKTIyguLjYimtb6qMYxAc7HyxLaC8ufGXE10DARAAARDo2QQCxQ+SEcOGyk/PJoHeOyOBpZ8vp1vm3OCMTUObQMBpCXS78lYr1lzj9c94vbSiymKxHloFVdfXiLXYaqi+QbwRbG4ULh9NxG8E5cdpUaJhPZXA75e/0FO7jn47GQEVboLfKHp6iLeKHl7k4+0t16HktStDA4IpOjiSEiPixSeOfLx8nKwHaA4IgEBPIcBzyT/4aCkNykhHpOaectPRT7sQ6Dbl7WjBcdopFsY+WXgG/vh2uZUoBARAoKcTUOu+sBuceO1F9dRANQ21VE6VbdCwL39qdB8anTLMYj3LNhlxAARAAAQcQECtm4p1Uh0AF0W6NQHDlbfcknxafXC9tLS5NVl0DgRAAAScmAAHsTh+4bT8xIX1pmmDr6OkyAQnbjGaBgIg4C4E9BGcEanZXe4q+mEUAUOVt/WZW2jLse2k3g5zJ9nNJya0FyVHJcpteGAYBfkFSDcfX7H+DLv9sPsPTy7EGjRGfS1QT0cEbll8r5blmRue0NJIgIAzEJDr1wh384amBqpvbCB2T6+sq6bSqovixdk5yinOp3NlF7SxmN3W3/n+YxqfNpauG/AjjLXOcBPRBhBwYwLK6qa6COubIoEtCHRMwBDlrbGpkT7bvZIyC45pLfIVcy3GpAynManDKcQ/WDuOBAiAAAiAwOURkHPeSETR8vImfzGtjcfYaFEkvyQbljRYFl5eU0E7T+2j7Sf3UF1jvVTkNouXa4UVxXTzyJkiCpfX5TUCV4MACICAFQJ6q5s6DeubIoEtCHRM4BIWLOi4UH0ODjLSWnFL651Cj0y6lyYNnADFTQ8LaRAAARAwiAArdBMzxsuxuH/vVK1WfsnGYzYEBEAABBxBoLXVTdVh67g6jy0IgICJgMOVN3aV1FvcJgi3nPlj51Kwn/Otm4AvBQiAAAj0NAI8Ft8xdg7x2KyEx2weuyEgAAIgYE8C1qxuqnxlfVP72IIACFgn4FDlLUcsAfD98R1azfzjgN/0QkAABEAABJyLAI/N4/uN0RrF85M5wBQEBEAABOxFoCPrWkfn7dUOlAMCrkzAocrb14e+0ybEs1sOFDdX/qqg7SAAAu5OgF3Z2a2dhQNLcWRgCAiAAAjYg0B7VjdVPqxvigS2IGCbgMMClhw9d1xbDoCDk9xwxfW2W2HjTGVVFWVmZtOxEycpL/8sFRWXUHl5BVVVV1NtbS01NDRSY2MjNYs1jTjsNQQEuoPALfPNkSe7o37U2fMI8BptHmIhbi8vL/L29iI/Pz8KDAigkBARmCQqkuLjYimtXyoNHJBOgYEBXQLEY/Ur69+WQUw4CiWvyZke269LZSAzCIAACLQmEBERTs898yvt8LO/f15L649zPggIgIBtAg5T3jiKmZKxqSMo2L9zc9w4wMnO3Xtp9Zp1tP/gYeKQ1xAQAAEQAAEzAfmySry44pdXdXVEVVXVVFJSas7QkmLlbsigDJoxdTKNHjW8U0sAcCATjgS8pcXlfefpfVDe2pDFARAAga4SiI3pTfyxJjxOQUAABDpHwCHKG68pdLLwjGwBr+M2OmVYp1qTeTSb3nh7MZ04ebpT+ZEJBEAABEDANgFW7vYdOCQ/qSnJ9OO7F9DAjHTbF7Sc4SVceL4y+zPwWF4vlhLwER4UENcjwB4s7K52/MQpeLC43u3rMS2GB0uPudVO0dH2vFeiIiMoIT6O0vqmUkZGfwoKDHSKNusb4RDlLUdMcldujL1DoztcDoCtax9+sow++2IFseVNLwmJSdR/QAbxNiq6FwWHhJC/v1jE299PuAx5S7chXryb1zWCgAAIgEBPIMBjJo+VrJw1NDRQnXAjr6mppvKyMiouKqTcnDOUnZVJ+Xm5Go6Tp07TM//9J5o7exbNnze33TGTrW8xob2oQCzkzWN5bslZSo3uo5WFhHMTkB4su/bSqjVriecQwYPFue8XWgcCIGAsgc56r7BuMXTwQJo+dRKNHjm83eemkT1wiPKWX1qg9SElKklLW0vU1dXTCy+/Rtt37NZO+/r60rWTptDV102i8IhI7TgSIAACIAACpD1A2C2Sx8tA+WYwgmLj4iWeK1sglZaW0Kbv1tGGdd9KBY9/1H/2xXLKzc+nJx97kHx8bFvTkiLjpfLGRXHUSShvLVCdfHMkK5vefAceLE5+m9A8EAABFyDAL77M3it9hPfKwk55rzi6aw5R3ooqS7R289tbW8JQWituAwcPpfl33iOUtghbl+E4CIAACIBAJwiEh0fQjXNuoWuum0xL3nuLDh88IK/il2UvvPRP+sXjD2uKYOvi4sJitEOFFcVaGgnnJMDP0yUff0bLvlzZxoMlJTGWhqQnU3JiDMVERVBYaBAF+PuSv58veYsXAF5enuTpIT6ePNEBAgIgAALuTaCpqVl4lTQJ75UmGfywRkwer66po9KyCjpfVEqncgro4NHTdDrvnAbi5KkzLd4rM+mOeTeTVzd6/DlEeSurrtA6Gx4YpqVbJ/hBo7e4XT99Jt0499bW2bAPAiAAAiBwGQTCwsPpwceeoK+WfUrfrF4pS9q2Y5d0V19w+y1WS9aP3fox3WpmHOxWAtY8WFgxm3HdGPEZS1HhId3aPlQOAiAAAs5EgF9UiXjN8uWVn6+PmNfmL5uXGBttauZVpk1RaTl9vWEHrVi/nWpq6+SLMZ7ilZuXT0889pDwfLHtveLI/jpkolh1fY3W5iA/62GqeQI1vyFUAsVNkcAWBEAABBxDgF+OTZk2UyucH0IcKMqa6Mfuqrpqa1lwzAkINLIHy0uvWrwIHTE4jV763SO0aM5kKG5OcI/QBBAAAdckwC++Ftw0SY6nI4f01zqxfece6TnYXfOJHaK81eiUNz9vP62zKsGdfeOdDzTXjkFDhsLipuBgCwIgAAIOJDD75lspY9AQWQPPgeMIv7xtLfqxu7ahtvVp7DsJgSUfLSX+IaHk5ukT6OlHF1AkrG0KCbYgAAIgcFkEeDz97SPzicdXJew5uOTjZWrX0K1DlLd6Ef1Mia93W5Pirt37iCOfsfiKxWXn33mvyo4tCIAACICAgwksuOteGeiEq+GlWXhtzdbiI6L5KtGP6eoYtt1PgD1YPv9qldYQ/mGxULwlhoAACIAACNifAI+vc6eN1wpe9uUK4iBRRotDlLfG5katH14eXlpaJTh8sRKOKsnzMSAgAAIgAALGEOCAUDz2Klm9Zp1KaltvT7Pyph/TtQxIdCsBkweL2WrKLj1Q3Lr1lqByEACBHkCA3dHZNZ2FvVY4uq817xVHonCI8sYRXJRwBCu9VFdXy3Vn1DFeDgACAiAAAiBgLAH92Lv/4GHixZz1oh+79WO6Pg/S3UeAraUc/YyFg5M8vOjG7msMagYBEACBHkTgoUU3yHGXuyy9V8S6mkaKpWZlp5r1E/haL5599NgJbcHQxKQ+xKGsISAAAiAAAsYS4DU0ExJN63DymJ2Zaen64eFhDhuvH9ONbSVqs0VAby2dNXEsRYQF28qK4yAAAiAAAnYkEBUeKqP5qiL1HoXqmCO3DlHe2mtw1tFj2unUfiazo3YACRAAARAAAcMIpKUP0Oo6duKkluaEXnmzOIGdbifAVtIDh45o7Zh27RgtjQQIgAAIgIDjCfAyLEp4PG7tvaLOOWLrEOWtbdwyc9N5bQQlfZJTVBJbEAABEAABgwmw94OSvPyzKtlm296Y3iYzDjicAFtJlTU0OSEGywE4nDgqAAEQAAFLAryMQEpirDzI4zEHkDJKHKK8tdf4ouIS7XRUdC8tjQQIgAAIgICxBPRjsH5sNrYVqK2rBPRW0iHpyV29HPlBAARAAATsQEA//h4/ccoOJXauCMOVt7Kycq1lwSEhWhoJEAABEAABYwnox+Dy8gpjK0dtl0xAbyVNSTK9+b3kwnAhCIAACIDAJRFITozRrtOPy9pBByUMV96qRLRJJf7+ASqJLQiAAAiAgMEE9GOwfmw2uBmorosE9FbSmCgE/eoiPmQHARAAAbsQ0I+/+nHZLoW3U4jhyltdXZ3WHD9/Py2NBAiAAAiAgLEEfP3MY3Btba2xlaO2Syagt5KGhQZdcjm4EARAAARA4NIJ6Mdf/bh86SV27krDlbeG+gatZV5e5kVgtYNIgAAIgAAIGELA29s8Bjc0NBpSJyq5fAJ6K2mAv+/lF4gSQAAEQAAEukxAP/7qx+UuF9TFCwxX3hpFRBYlXl5eKoktCIAACICAwQT063A2NkJ5Mxj/JVent5LyAt0QEAABEAAB4wn4+5rHX/247OiWGK68qfDG3DH9DwdHdxTlgwAIgAAIWBLQj8HNuhdrlrmw52wE9B4s3ngJ6my3B+0BARDoIQS8vc1GKCO9VwxX3pqbzSsGYRHYHvLtdrJuFhcX0cb1ayk354yTtcyY5lRWVsj+H882bk0SY3qGWrpKQK+8NenG5q6Wg/zGErD0YDH8MW5sZ1EbCIAACDgpAS9P8/hrpPeKecKDk4JBs9yXwMZ139KB/fvadHDc+Ak0asyVbY7b68C5s/n06YeL6aZb5pF+kWJ9+f/421/0u1r6Jw8/Sr6+5iAP2gkXSpSVXpT9v3by9dSvf7oLtRxNBQEQYAJ6Dxb9jwfQAQEQAAEQMI6A/gWokd4rZpXRuL6iJhCQBAoKCijryCGqqqqihsYG7dPcbJ4X2V2ouF2nT53Q2qTaB+NEd90R1AsCIKAIwINFkXDfbWNjE636bgftPpTtvp3sRM9yCy5IDmfPF3cid8/JUnChxOIlTs/puXP11NPTQ2uQkd4rsLxp2JHoLgJ33Xc/xcTG2ay+vKyMigovUFR0LwoJDW2Tj99CF144T7W1NaKceGEZM08gVZnLLl6k0pJiio1PoM666yb1SaHHnnxKFdFmq+qtqamh2Li4Nha5kpIiCgwMJj8Rjr28vIwKz5+X/awR7QwLDSOvlkh/1UJ5ra6ppuDgYK0MnvjK7o36Y7xfJPoZGhZB4RGWaztVV1dRfW0dhYaHywGdXULDwyNE3jDZ7sqKCskoJj6ePHSDTZtO4QAIgAAI9BACK9dvpx0H2rqPT7xqGF0zZmi3UqhvaKB/f7SKxg4bQCMH93dIWz5avoEyT+TIsj09PKhXZBjFx0TJvoeHBTukzq4Wmnk8V3J44sc3U1zvyK5e3un8327eTWu/30uD+vehO+dO6fR13ZFx7+Hj9PuXFtM9t06lGyeP644moM5uJgDlrZtvAKq3TYAVksXvvEX79+zSMl151Xi6fdHdpEKcnz55gt5+/TUqKiqUefz9/Wnegjtp9JVXyX32Qf548bv0w5ZNct/Hx5cmXT9NK+9SE9lHM+n9N/9NJUIhZPEUQQNm3ngTXT99lqYcPvvrp4Rr5m1UWlpK7CIaHBJCU8X5pR8vofsfeISuGDlKXvvWv16lTGHpu376DXTj3JvlsTUrl9M3q1fQU799lpL6JNM7//4n7d65ndQb9/iERLr1joWUlj5A5l/79WratGEdPf6LX9PLL/6FyoWyOv/Oe+iqCdfQV8uWyrI4I5v4p98wW16DPyAAAiDQkwnkFhTS/iMnqF9yHPnpXvo1NZnn5rszn1O5BbL/g9L6kHhwESsF3wgl5pOVG+m+26bRxKuGu3P3Lfr21dptdPZCEZ3KKaCbp0+goAB/i/PM5Oy5IvrpvXMtjr/+4Ury9fGmu2+ZanHcXjtLV2+iM3nn6Ykf36IVmZIQQ9OuHkVXZKRqx5DoWQSgvPWs++1SvV326cdScZtz6zwaOmwEbd2yRSghy6lXTAxNnXGDsLTV0r9e+bsYOH3pwcceF8pRKC375EOh8L1JSckp0sq1/YctUnHjuW0zhHJ1Trhqrv/268viwJbA1//xd/IRD3ueN9e7dwyt/WY1Lf/8M4qMjNIUR67k269XSYVr4pSp1Detv7DQxRMJ5e1oVqZU3rgP2dlZwkoWSZmHD2jKW+aRw9LKqObkDb5iGA0fNUr0K5XOF5ylN179B60T/VDKG9fFFryX/vpnihOK3awb51D6wEF05PBBqbj1Em2cddNcqqwop3VrLq//XBcEBEAABNyFwKN3zaE+8b3adKdGeDNUVFVTdEQYsRtjztnzwjoVTkGBlj/s1YUXyyrpXGGJtBCFBAeqw3JbL9a4zRM//vkFWoKwbnl5tZ21UlVdS3lCoWQLk4+v7Z9nxaXCk6O4jOJiIikkyFwPR7srKaugiNBg8YLTiwouFFOdqLdPfG+LtrTe+dWDt1NwUIA8fOxUPv31jU/plfe/otSkWEpJjLXIXnKxnC4UXbTaR87IHinnCkuFN0kNJcXHkI8uGh+fL6+sEkpQMUVFhMoPH2stF4ovUnlFlbi+N7FF0Jpw0J78c4XyviTERJOPUKCUVFbVUJ2wXDIHznfyTIG4h6Fky5p47FQesXsmW7G+WruVvt95mK6/eqQqTjJc/8Neiu0VSdw2FrZS8v3atOOgsIymyePsQhcVbvYOYva54jvjL7xvYnpFkH5+6sXyStk3/p5UVtfQOeEGmZwYY5GHvzPrf9gnOenrDRTfv7lCwQxu9T3kl7vni0qprLyKEuOiKcDfcn5+kfjeBIpjfLz0YgWViXvR0XdDg4CEUxEwf9udqlloTE8i8Pe/PG+xbMRDjz1BUb1603ZhLUtO7SssZdMljpmzb6ItG9fTvt27pPK2Z9cOYkXq5nl30KAhV8g8U6bNoH++/CId3L9XKm87tv4gj8+/615pwRo6jMhPDFyffPB+h4hPHM+mZ371pJYvMSmZHnj0Z7R96xZiV0m2YKm2JSUn03/9+hdCMVxjobyxu+Ivn/6dRWCUqKhoobwdkeUezTxMjeIhM2nqVPrs4w9lf9iqmHfmFI0S1kPl4jl6rNk1ghVEVswOHdwvFVh2y1SSlp5B9z3wkNqlFV8sk2mlAPNOeEQkvf7KS1oeJEAABEAABNoS2LLzkFRifnrPHHrz46+lIuctlK5hA/vRz+6bq1lnWGH6xztf0p7Dx7RCfjRqEP38/lvl/ppNu+jtT9dQbV293GdF6eFFN9KVwzO0/Ks37KA3Pl4tlB+T1e+2mdcIhcQchpwz8g/uF99eRvszT8rr+Pkwc+JYule4z3E65+wF+sX//Iv+85H5xC6hbEkbMSiNnn5sgVZPR4m0lHh6cOGN9NyL79GSr76j3zx0h7yElY0X31pG+4SlUsmM68ZIC50K2sDtevmdz6motFxmYVaPCXYTRg8Rypx42bpkJW3cfkBdTgNSkwTHORQTbZoGwMryC29+Rjv3m1xZmdMkK9a/A6Kev4m2lAruLCHBAfTInbNpzBUmT5TPvt5C677fQ8/+bBH9t3AvZKX6sbtvouvGiR8AVmT91n1Skbp52gTavi+Lvtu6V1PemPkv//S67BMrpQ/+9kVZwovPPkzP/OVtYkWRFTj+hIUG0ZvP/1ye53v+lvjOsBLJwgr7k+L7kCIUNBZuf21NPQ1OT6bPvt4sj/H1s8T9vGX61cS8f/nH16mwpIx4vp+q9+OXn6ZDR0/RH17+wMJt8vjps+L+fCZfEHBh/H2YPWUcLbxpsvai4ClR3kjxfagR38Mfdh+WdcZEh9P82ZPo6jFD5D7+uAYBKG+ucZ/cupX9+2eQf6D57SGneQ4bT/7Mz82h//z5z7T+14i5YRzqn+XCuXNyu/KrL2jNyhUyraKwFReZ8hQLd0o/P3+KT0yS5/lPXFyClm4vESTmoA0eanYbie4VJbOfb6m3X5o5UiMrRDwn77yw7OmFLW3KeqaODxw8lDYLJZTn4R0+eIDYKjZi5BipvGUKS5mvaC/3fdAQ85yLbd9vpl3bt9EFwYXnvtUJix33taG+Xs6pU2WPGmsZpbNYzBVkYaufkjgx7w8CAiAAAiBgIrDvyHFp8VI8hg5I0SxRfOyfH6yQP6qHCwvLN+JHOf9QZwXj2itNLw1fW7xcKm78A5hdDc+LH/m9o8JkcfxDm69PiutFN8+YQHV1DbRM/Fj/y78/pb8+/SAlxkYLq0+hmNe1msJCgmj+jddJK9IX33xP9fWNqkly++oHy+mgKO+hhTeIuVnJtHzdNlohPmz9NgxQAAA9PklEQVQ9mTJ+hJb3JaFABYopBAtumkQZ/czPPi1DBwl2x/P39aHTuaZnLGfnPrDS9MCCWTQkPUUqhxxQpY/o19RrRksF6fnXPpIWvztEH7jeoyfz6IoBfWVt7y1bKxW38aMG05QJI4VieYxWrd8hOTz/q/ulsvHltz9Irty3m6eNp6wTuaKPWy1ay0rN8//8SFpDHxcKNFvc/v3havrbG5/RS889QpEtlq8yYbl77sX3KTmhN024caJQklIsylE79cJauXnHIRoi7nloSCCNF0o3K3+sMLEFlK2sP//JrfSff36LBot23SvcSVnYkvfIXbPpj698SONGDqRbhcLlJaZPsBw5niN5sUVu4ZxJ0lLJ9+4F0ca/Pv2ApkxlncyRLwQeEop8mKj7vc++lS6rMydeSYHCbfOp/7iNfvX8GzSwXx/68e3TZdnWLLa1dXX0p1c/FEpZHd1+w7XUPyVBfje++OYHUW4w3XS9aRoJF8CKKrvJ/lKUXS2UOFYwP1mxEcqbpOs6f6C8uc69ctuWzhAWtdYBS85WV8v+9hFugtdOmmzRdy8v09dWWaUmXDOR+gjLl17YcsfiwWtwCEVIL/X1dfpdm+mYmDi6Y9Fdbc4HBJgUzbpW5dQ31JNHK1eYsLDwNtdnDB4slbeTJ44LV8lDUkkLEwFIYkUwEXaXDBVBWbhvGQMHy2vXrllNXyz9mAZkDBKuj3MoIaEPfbnsUzp0YF+bsiMiLOuT/edcOgT1LW9/21yMAyAAAiDQAwmwVUwvf/rlj6l/qvklF7vQLZxjeg7FCisRK2/b92VK5a1IWEZYkWMF6vH7THOW9WWt/G673H1w0Q2U0dekSAUJ74//E8rbKnHuJ3fMpE3CGsUub7cIVzhWbFjYQvPkH/4p0/yH3ea4ntFXpGt5Fok2rd2ym7buPmKhvNULBfGvf3hAuOv5atd3NcFujflCgeGXhGzh2rY3U7oHThVzrVgWzp1M336/m34Qx1l5+27bPmLL2YLpE6XliPMMHWCak8UK0jebd1FkWIiwPt3Cp+R8rfPC/ZItQIePnZGK0YZt++W5XwhliRXZEUL5YXdVZZnik98J5aO6pk7OS1Pl3zBpLL30zhe0V1gF9ZY6VkL1c8Vk4a3+MFN2jR0vrIMsvGXljeuZP3uiVA6VtYwVKnYlVcKKNwu7ruqPrxJWT5a7hUWU87Dr6b7MEzJqZp5w9dS7KrJFUH3X8oVb7btCgdt78BhdJZTI5BaX1YAAX4vyZeG6P1v3ZFKxcGdlJW3erGvlGRl45cn/peXCDVSvvLE77a8fvkOzGu8X1tkN4vvHdXOwGohrEIDy5hr3qce1kq1R/BaroCCfhg4fqb3R0oOISzA9XDmwyfBRY/SntHS0sIZxpMr83DPCbTJFHs/OytLOX0oiOdX0QMoW89b6CzdFlgvnz1GZCEyS2jetwyLTMwbKACeHhdsjt22QsMSxDBw0lA7s20MRworXJyWV2PLHwtY5lvl33yvn1LEPf07OaXmsoz/cf16M+8SxbMHR9GaWg61AQAAEQAAETAR+++h8imvxrOAjbFXRS58485wxVmrYHZDnFbHknzd5ebD7mzXJF/O72P2xX5947fSglrw8v42F58mx6K1kibGWc/D4xzXL/iMn6d6n/k+m+U+jcLNU86HUwTEiQqVecdu25wh9KX7EK5knXDKHDeqndq1u2RU0MixYTmlgJY7l4NHTlnU3NAmrUqk8x5YqFmsWLu4fu4MO5MAoOmFLFitvzGGgsNRxP9jaxYqbkqSEVhxaeHMkzrc+Mc3fbhCByVhUW9S1V481e6+oY623rKSx8HxGDkrC4uvjQ6xIsgVRvSSWJzr5R30n2K1SSV3LS1OeL6hX3vro+scvBlguirloXRH13WALnRIOwJPWJ4HYulctlOqAFkU+KjxEU9w4L8/FY7koLJVQ3iQKl/gD5c0lblPPa6S3GDynTJtJX6/8il558a90lVi4OygomPLycmX4/HHjr6bhI0bRarHEAAclaRYPhmEjRwqXlFoxnyyLrps0Rbojjrv6asoS88re+tdrNHnadBFq/wJ9v9k0QF8q1YGDh8igIOuERayyspJiYmJpw7pvZXFTps/osFh//wDqK5S83Tu2k7dw+eifYVIA2SL33do1wp2ylCZPNZfTT7g8Zos5cls3b6LUfv1ox7atVFFe3mE9nGGciDa5TfD5aMl7IiLnBaoWFs3vN23o1LXIBAIgAAI9gUC08FhoLwy9n5+PTQzqxz1biKxJYICftB6xgqGCd/AyACzKzU7NGdO7SNQJl3hVNudVgTvYpXNiq7lb/v6WFjZWMPXC86i5HUq8WgURUcfV9pRwl+RgHGnC/Y5FtYOVrck/Mk8l4HN+/i1shLcIS6OwsrWWoJa61Zw/dZ4DerB4ixe1XIeqR53nLbuZ6sXDQ3jTCGFrUrxO4eZjCcKFUy9s6WtPeC6cWkdvuYg2qRdWJA9mnaKhlxDRkVHw/WIXS58WV0pVdmqSac6b2tdHOVXHurpliyBLrfjO6EXNt9OvRaZX6vV5kXYtAlDeXOt+9ajWckAQfsis++Zrqbxw5wODguiWefMlB14n7ZHHfy6CN74vFJTNQikzKSW8ltvkqdNlnpGjxlJBXr6IBrmKPnr/XYoW7pT3P/govfzCn+X5S/nDytcjP32SFr/7Fm3bskk8XOpkZMiFd98no2J2psxBQ4fQMRFlkl0jfX1ND9W0tAGyv1weK4hKxk24mk6dPEFrVi2Xh0YIK+Od994vlw9QeWxtWfG79fYFcrkADojC68Dxtby8AgQEQAAEQODyCCilj+eiWROef5Qp5kBlCtdAdgNkOSQsWCw8D46Fg0awHDmWQ31bLHSHs89oS8PwucSWaJhs1WGXuq7IqKH9iT+dEZ5TxnP4WOZMuUpu2fWPFav884U2644TkRhZ2DrX2voWIZQotvgcPnZaumEqZfVwtomDKj8mKlwG3GClioN3sPCcQb2oqKCs1HWVg74cTm/ccUBaBGVQFTEXT8lJsVzAr//3DTk/jJU3T08vqYyxQq0XFeFSWdXUuT4iyuYJEeHSXyj9Y4eZXs6qc13ZsgLIipdScm1d2z/VZNXl7xUHh2Fh5ZuXguBgMH7iZTjEvQhAeXOv++lSvZm3YJFYk22RzTbzW8nps2bLDwf34MWl2fqmBn6+kAOF/MfDPxVvNhvporBY8Xy0gIAArUx+4HCIfFYEq6oqKSTE9Ebyb6/+W8tjLfH3f75p7bB2jJWgh376hIwUWSXcNlW5WgaRaK+MKdNmCcviLH12ERrah/768r8sjvEOR5d8+GdPyqUAfMQgzFZJllFjzMFJbphzM/HHmlwjrJATrpskrXVq0e4//uXv1rLiGAiAAAj0OAJrNu7UlAXuPLtADkqz7gbZGg6HhueokTwn7NkX3pXz4NgFj59TvNjzlAkjaJ0IM//yu1/KyJAc/n2ViCzpK14+zppkGsM5yMmnqzbRRys2UJWIyugnngUcrVAvHPb+ejEfjtdh+59/LJGRE9kieOLMWTGvqhddJYJmXKpweHwWDle/WwQS4QiKs4Xiplwr2Y2R57p9LTj9PxHlkKM2srWPlZz43lHEkTXZGvjJyg30uZgvxnPk+iXHS8Xr6rFD5CLjN13/I3pTuDn+9v/elvPzOGrl7oPZcq5bet9EWf/k8SPFnK9vRITI9yWb46fzabvgqhdmxXVwsBZmNXpIfxFqv1ZE4Dwh56jpXS7111lLcxh+dpEcJ+4fzwVTwnPQODrkVuFu+pP5M6XLIbt8svLJSwbwS2W+53zvOR9b73geJE9puE4EsZk77Udi/4AIpLJKKFDnZMARDtPPAVg44EtnhYOTcHAYnhPI3yFu45XCJba19E9JlEFIOMImW3jTxf6qDaZ1YedOHd86O/bdgIDhyhv/mFYLDfPWmpncDbiiC3YmoJQOW8WyosdKji3h89YULFv5O3ucrX+OKNda/QG6iJzWzrd3jH9IdMSwvetxzj0JqOis3DuMxe55j9GrjgmwMqUXnuvUWeWNr3tIBCNh179te4/IaJD8f4nDtLOwYsWh+9/+ZA19+NV6aelJEmtwPSDC8fNaYSy9hcXpyftuodeWrBB5vpMRDv9DKA2fr/lenld/7p03Tbo/fr1xF+0Sig9LuFDquP7LEVYceV4ez7lit0xW3Aa0BFdR5d4jgm/w+mC8pMHuQ8fk4XBhHVPKCEdqfOaxhfTq+8tNiqdQPnlNscktUTBZUWULEiufvIYczxu8csRA+g8RsEUJL3twVqxNx0rIP4Syy0FAfiGiInJYfCU8d+t3j99FvDg2t4WXRGDp2yfWYo00ld/WlhXP03nnhKVqsMX8QJX/GqGELflyvZyTx0FQ5giF7N2l30olnNs+QswZZIvWbSJACN/Xv4klDjhCJytvfM+f/ekiGUH0o+UmjyC2oKmlDFQdndnyvSgVlkjmwfUO++MTbS5jd1wOQvLKe1/K4DXrvt8r1oALkEtI8MsDiPsR8BAKVLO9u/X75S9oRT5zg+UXbd6i+6WVhDO88Mrrms+3dgESIAACIAAChhBgi/UTD/9E1sUvOD5+39Ii3d5YbkgDe2olH+w193yB5RwjPnHL/Hu180tf/S8tjUT3EuB5byVl5fKHs7W5RRw4gqMft148Wd9qDhQSLixdHb1M4YW6vUXkZV7jrKO8+vLtke6obl7Tja1i4SJMvbXQ9uwWyWu4WTvH7WPrJIe91y9Abq3dHMWSrXxclgrIYS2fPY/xIuVBQjHy1S0KzuWzZS00OEib16jqVAu9h/G5VteoPJ3Z2qq39bVs/SsXwXRsLUjeOj/2L5/ALQ/9t1bI0iVvaWkt0cF4ruXrQsJwyxuvMM8/GFh4yz8YICAAAiAAAsYTUGMx18xjM8Q1CPCPdfXelaP46QMSuEYP3LOVrIxER5isadZ62BkFg90jOyNqPbPO5LV3no7qZuW0PQVVzWez1S5Wcjqj6LDFSVkvbZVl7+M8f8+asAulNWEl3poiby1ve8ds1dv6Gh7Hobi1puK4fR5/lRj5EsXwpzVHPVLS0BJxSe1jCwIgAAIgYBwB/RjMkU8hrkFAP++3qdl6lEPX6AlaCQIgAAKuS0A//urHZUf3yHDlzVe3aGRdba2j+4fyQQAEQAAEbBDQj8G+Yl0giGsQ0FtJbYWod42eoJUgAAIg4LoE9OOvflx2dI8MV94CdZEAa2qqHd0/lA8CIAACIGCDgH4M1o/NNrLjsJMQ0FtJ1QLFTtI0NAMEQAAEegyBBt26hvpx2dEADFfeQkPN/sKdXWjY0RBQPgiAAAj0RAL6MVg/NvdEFq7UZz8/84LLHBABAgIgAAIgYDwBDqyjxEjvFcOVt6jICNVPKiq8oKWRAAEQAAEQMJaAfgzWj83GtgK1dZVAYIC/dkl1jfnHg3YQCRAAARAAAYcTqBZrDCox0nvFcOUtMSFe9ZPOnD6lpZEAARAAARAwloB+DNaPzca2ArV1lUBIiNmDhcOuQ0AABEAABIwncFEsy6DESO8Vw5W3Aelpqp908rhpoUftABIgAAIgAAKGEdCPwfqx2bAGoKJLIhAVYfZgOVdUckll4CIQAAEQAIHLI3C+qFQrwEjvFcOVt/T+/cSaNKZqc3POUGkpHjzanUcCBEAABAwiwGMvj8EsPCbz2AxxDQIJCXFaQ0/nntPSSIAACIAACBhH4PjpfK0yI71XDFfeAvz9aejggVpnN61fq6WRAAEQAAEQMIbApu/WaRXxmMxjM8Q1CPTrm6I19ODR01oaCRAAARAAAeMIZJ7I0Soz0nvFcOWNezlj2mStsxvWfUulJbC+aUCQAAEQAAEHE7hYWko89iqZMdU8Jqtj2DovgYEZ6ZoHy6ncAioqLXfexqJlIAACIOCGBIpKy+hkToHsmfReSetrWC+7RXkbPXI49U1Nlp2sE2E2l7z3tmEdRkUgAAIg0NMJLHnvLVILdKemJNPoUcN7OhKX6n9QYKCFB8uq77a7VPvRWBAAARBwdQJfb9ipdUF6r+jWsdZOOCjRLcqbh4cH/fiehcRbliOHDtBXyz51UBdRLAiAAAiAgCLAY+3hgwfkrhyL716gjcUqD7bOT0BvLV313Q4qhvXN+W8aWggCIOAWBHi8XbHe/NJs+tRJhvarW5Q37mFGen+6+aZZWme/Wb0SCpxGAwkQAAEQsD8BVtx4rFUyd/ZMYhc8iOsRYGup8mDhhbpfef8r1+sEWgwCIAACLkjg1cXLicddltSUPsQehUZKtylv3Mk7bptLY8eM1PrLPypee+kF4vkYEBAAARAAAfsQ4HnFr/79BQvFjcfe+fNutk8FKMVwAmw1ve9uswfLnkPHaPEX5iA0hjcIFYIACIBADyDA4+zug9mypybvlYXaHGSjut+tyhtP8Hvi0QctFDh25/n9f/2Gvvp8KZYRMOpbgHpAAATckkBpSbH0aPiDGFPZPV3JlWNG0ZOPPWj4A0fVj619CAwc0J/mzjZ7sHy2ejMUOPugRSkgAAIg0IYAK248zirpLu8Vb9WA7tr6+vrQU48/Qks+/ow++2KFbAZPpP9m1Qr5SUzqQ6n90qhPcgpFRfei4JAQ8vcPID9/P/Ly8hYfL/kDRM2f665+oF4QAAEQMIpAU1MTNTc3U2Njo/g0UG1NLdXUVFNFeTkVFV6gvNwcys7KlFt9m3icZHd19npQ623qzyPtegTmz5tLufn5tH3Hbtl4/mFxSqz99vCiGykiLNj1OoQWgwAIgICTEeA5buyazh4OSsaOHkF3dJP3SrcrbwyBf0QsvONWGjn8CnrjnQ/o5CnzujW8iKxaSFYBwxYEQAAEQKBrBHh+FAeK4vnGEPchoDxYXnjpVdq+c4/sGLv0PPrsyzRr4liadu0YigoPcZ8OoycgAAIgYBABXoaFo/lyUCg1x42r5mkH7DnoJfSX7hCnUN5Ux3ni/P/+v/+iXbv30ao1a+nAoSPEb5ghIAACIAACXSfAP+yvGDKIOBIWT6iGh0LXGbrCFezB8osnHqUlHy2lZV+aAtLwD42lwgrHn+SEGBqSnkwpSbEUExVBYaFBYlF2X/L38yVv4b3i5eUpf4Tg++EKdxttBAEQuFwC0nNF6BeNjU3UIDxYeLysrqmji2WVdK6ohE4L74WDR08LLwbTOm6qPh4j2VWdPR6603vFqZQ3hsMwxghTJH+qa2roaPZxyjp6jHLz8qmouITKysqpqrqaeH24hvoGahTwlQuRgostCBhBgP/z48eOEaRRR2sC/L3jsZLdxr29vcjPz48CxRozISHBFBUZQQnxcdSvb4qMJMlrgkHcnwC/AV40/zYaOeIKelN6sJzROn067xzxBwICIAACIHBpBNh7hYNE8Vzj7hanU970QAL8/WnY0MHyoz+ONAh0N4EjmUfp6ef+SH949jcItd7dNwP1g4ALESg4d55KSi867AfAoIwBwoPlWdq5ey+tXrMOHiwu9N1AU0EABJyLAL8k5QW4eV1NXp7FWV7YO7Xy5ly3EK0BATOBjz79XO7w9ndP/9J8AikQAAEQaIdAYVExPfv756U767xb5zhEieMfHGNHj5SfyqoqyszMpmMnTlJe/lnpwVJeXiE9WGpFcDB4sLRzs3AKBEDALQlY917xF94rIS7hvQLlzS2/luiUIwmw1Y3nY7Lwlvex0LEjiaNsEHA/AvsPHib+8JxERylxTI3dZkeNHCY/7kcRPXJlAvBgceW7h7Z3J4HuCZPSnT1G3SBwmQSU1U0V03pfHccWBEAABDoiwArc07/7H3ru//2ZjmSZFn7t6BqcBwF3IKCenWrrDn1CH0DACAKwvBlBGXW4DQG91U11CtY3RQJbdySwd/F+2a1bFt/rjt1zmj7pLXEPh4ykXtFRTtM2NAQE7E1A/yzFM9TedFGeuxOA5c3d7zD6Z1cCtt4Q2jpu18pRGAiAgNsTYCVu7fqNdFi4Y0NAwF0JtH5mtt53136jXyBgDwJQ3uxBEWX0CAL6N4WtO6zeHLY+jn0QAAEQ6AoBjmw2eeI1NEisewoBAXckYO1ZimeoO95p9MlRBOA26SiyKNftCHT0ZpDPI/Kk2932Ht+h4QuvkAyeueGJHs/CHgAOHs6U0SZbl8VK2+0cfZKVtg/2tj6NfRBwGwK2nqV4hrrNLUZHHEwAypuDAaN49yBg7U1h656pN4eIPNmaDPZBAARsEbBQ2mxlwnEQcBMC7T1L8Qx1k5uMbjicAJQ3hyNGBe5AICIinJ575ldaV3idJiX645wPAgIgAAIdEYDS1hEhnHdHArasbqqvsL4pEtiCgG0CUN5ss8EZENAIxMb0Jv5YkyGDMqwdxjEQAAEQaEMgOiqS/vDsb7A2ZBsyOODuBNqzuqm+w/qmSGALArYJQHmzzQZnQAAEQAAEQMCuBNp7EWTXilAYCDgZAXiwONkNQXNcloBTK2/VNTV0NPs4ZR09Rrl5+VRUXEJlZeXEx2tra6mhvoEam5qoSXyam5td9iag4a5N4Jb5WP/Kte+g67Xe08ODPDw9ycvLi7y9vcjPz48CAwIoJCSY2LITHxdLaf1SaeCAdAoMDHC9DqLFdiNQWVVFbPE4fuIU5eWflc/R8vIKqqquNj1HGxqpsbGRmvlZiueo3bijoK4R0E9F6NqVyA0CXSfQ3jM0KjKCEuLjKK1vKmVk9KegwMCuV+DgK5xOeWMlbOfuvbR6zTri9W5YMYOAAAiAAAiYCcgf2eIHN//orqsjqqqqppKSUnOGlhQrd+zWO2PqZBo9ajh5CKUP4v4E5HN0115atWYtsRsanqPuf8/RQxAAgc4T6Owz1FO8JOX5ydOnTqLRI4cT7zuDOJXylnk0m954ezGdOHnaGdigDSAAAiDg0gRYudt34JD8pKYk04/vXoC5Vi59Rztu/JGsbHrzHTxHOyaFHCAAAiDQPgF+8WV+hvYRz9CFTvEMdQrljeF8+Mky+uyLFW3cH1OTYimjbxL1S46n3lHhFBYaRAH+vuTv6yvdhby8PMnTQ3w88Ua5/a8gzoIACLgLgaamZuHi1iQsb03UINzeaoT5rbqmjkrLKuhC0UU6mXOWDh49RafzzmtdPnnqND3z33+iubNn0fx5c53mDaLWQCQuiwA/R5d8/Bkt+3Jlm+doSmIsDUlPpuTEGIqJijA/R/3Ec1RYZ/EcvSz0uBgEQMDFCLT3DD1fVEqncgrEM/S0eIae03p28tSZlmfoTLpj3s3k1Y1WuG5X3urq6umFl1+j7Tt2a4D8fH1o5sSxNOO6sRQVHqIdRwIEQAAEQIDkyyox403+8ObxMijQX2JJjI2W24lXDZPbotJyWr1hB61cv51qauvkj/rPvlhOufn59ORjD5KPjw9wugEBa89Rf6GYzbhuDJ6jbnB/0QUQAAH7EmCDT3vPULrKVB8/Q78Wz9AVFs/QFTIOxxOPPUS+4vnbHdKtzpv8prC14jZicBq99NwjtGjOZChu3fGNQJ0gAAJuQ4Bffi28aRK99LtHaKQYW5Xwy7IXXvon5kIpIC685aBdL7z0qsULUPkcFfccz1EXvrFoOgiAQLcT4GfoAvUMHdJfa8/2nXuk/sJ6THdItypv7OKht7jNnTaenn50gVDaQruDBeoEARAAAbckECkeQL8VYyuPsUq27dgl3dXVPrauSWDJR0uJf0gouXn6BPkc5XsOAQEQAAEQuHwC8hn6yHzi8VUJ6y9LPl6mdg3ddpvyxqGL2TdfCf+o4LeEEBAAARAAAccQ4DF27lSzAsfzjDlQFMQ1CfBz9POvVmmN5x8WbGmFgAAIgAAI2J8Aj6/6l6DLvlxBHCTKaOkW5Y3NjG+884E2qZrdeaC4GX3rUR8IgEBPJLBo7mQaPqif7DqHlOcIv1gn0/W+CabnqPnejRQuPVDcXO8+osUgAAKuRYD1FXZNZ+FnJ0f3NfoZ2i3K267d+4gjn7HwpOqHFt0o0/gDAiAAAiDgeAIP33kjcaATFl6ahdfWhLgWAb5nHP2MhZ+jD+M56lo3EK0FARBwWQIPLbpBjrvcAfkMFetqGindorzxwqFKOKokfPMVDWxBAARAwPEEeF4xj71KVq9Zp5LYuggB/T2bJe5lRFiwi7QczQQBEAAB1ybAz1CO5qtEr9eoY47cGq68VVdX04FDR7Q+Tb/W3HntIBIgAAIgAAIOJTBDN/buP3iYKquqHFofCrcfAb5X+ufoNN29tF8tKAkEQAAEQMAWAV7OTAmPx9U1NWrX4VvDlbejx05o4al5AW6s4+bwe4wKQAAEQKANgaiIUEpOiJHHef5UZqbxk67bNAoHOkWA75UKUc33EM/RTmFDJhAAARCwGwEed1mPYeHx+Gj2cbuV3VFBhitvWUePaW3K6JukpZEAARAAARAwlsCQ9GStwmMnTmppJJybgP5e6e+hc7carQMBEAAB9yKg12P0+o2je2m48pabl6/1qV9yvJZGAgRAAARAwFgCKS1vDbnWvPyzxlaO2i6ZgP5e6e/hJReIC0EABEAABLpMQK/H6PWbLhfUxQsMV96Kiku0JvaOCtfSSIAACIAACBhLICYqQqtQPzZrB5FwSgL6e6W/h07ZWDQKBEAABNyUgF6P0Y/Lju6u4cpbWVm51qewkEAtjQQIgAAIgICxBMJCg7QKy8srtDQSzk1Af6/099C5W43WgQAIgIB7EdCPv3r9xtG9NFx5qxLRJpUEBPipJLYgAAIgAAIGEwjw99Vq1I/N2kEknJKA/l7p76FTNhaNAgEQAAE3JaAff/XjsqO7a7jyVldXp/WJFxaFgAAIgAAIdA8Bf1/zGFxbW9s9jUCtXSagv1d4jnYZHy4AARAAAbsQ0D9D9fqNXQpvpxDDlbeG+gatOd5eXloaCRAAARAAAWMJeHubx+CGhkZjK0dtl0wAz9FLRocLQQAEQMBuBCyeoTr9xm4V2CjIcOWtUayFoMTLy/DqVdXYggAIgECPJ+DlaR6DGxuhvLnKFwLPUVe5U2gnCICAOxPQ6zH6cdnRfTY/uR1dU0v5amFR3tX/cDCoelQDAiAAAiDQQsBTp7w1616sAZBzE8Bz1LnvD1oHAiDQMwh4epjVKP247Ojem2t1dE0t5Tc3N2s1eXh4aGkkQOBSCGzccYC+27qvzaWncgvoq7VbaeueI1RTa55n2Sajix3gNzsFF4pdrNVorrMS8PQ0j8FNurHZWduLdpkI4DmKb4K9COAZai+SKKcnEtA/Q/XjsqNZeDu6ApQPArYI1An/4N0Hs2nn/qN0IucsJSfE0IC+iTRm2ACKCg+1dZnF8U9WbKS6unq6btww7fifX/+Etu4+Qr4+PsT/sd7888+1c9YSb37yNWWfzKNp146m6668wloWpzm2dNUm+mj5Bvqfp+4VrJKcpl1oCAiAAAiAgLEE8AztOm88Q7vODFc4HwEob853T3pMi15+5wvasusQxfaKoKS43nQg6xRt3H6ABqcnd1p5aw2rsrqGduzLotFXpP//9s4EPKoqy+MHslT2kBASSICELCyyaLMpi7SiKOLGIoJb02hri35q6zc9ozM66LQzfs7Y46czNtOfbas9CuoA4oKyKSiLzdoo+yZbEsgeslc2555bea9uVaUqVaFeVT3qf7+vePfdd9+79/1uuKdO3XPPod8+OJfafmoni1Di3KXaugZa881Oefnzr7Z3qry9tOQDGjkkh26ZepXDY55++S26eeqVdPW4EQ7l/jr59z9+REPzBtBt10/QHzl6eAFV19RTVkaaXoYMCIAACIBA+BGADPU85pChnvngqnkJQHkz79iZuueNwpRx256DlNM/g37/T7/W36W86gKlpSTr55zhukXnyigxIY44mr0nc9uDR09TW1s7ZWemU1VNHSXExzo8y/lk8879sv6t110lzSxPF5XIFUCtXmlFNe3Zd4z69+1DZZUXKEp4SO2VnEDHTxXTsVNFVFldK8tZQUxSgs6zEll0rpxSUxJd3qeiqobi4mIoVoTKqLpQS3UNjVJ51drkI3PY+cMRykhLkc9nj0YpSQnULyOVZt04iSxOYTbYU+C5sgr5Llmir1GKF0E2teR+9kqMp6ioSGF2WSVXJJklEgiAAAiAgPkIQIZChprvrxY99hcBKG/+Ionn+EQgUjhKsERHEccoYjthTSFTFTcuXy7MBD9a/Q21t9v2ShYMyqKnHpgjlTjnBleu3UoffLZRFq9Ys4X48+D8m2j6z8c5V9XPN/51L/VLT6Xbb5hIn3+9Xe6fWzDnBnn9wNFT9G9vLCPeC7Rq3Vb54fZnTptIr/55pazzl5XriT/jRg2hpxfNI1ai/rJyA63euF1v4/JhufSbhbN15e7xF/5AU8aPpEqhuLHJKKfMjN50z+1T6aqfDaNDx8/Qi/+9VL7zpxu+I/7kDuxL//HMQ8Srg8xDNZvctvsg/XHpaqkE8rNiBNcH5t1EUydewafEyuKiZ1+nu2+/lrjuqcISWZ6fk0kPzptBfEQCARAAARAwDwHIUMhQ8/y1oqf+JhBwhyX+fgE8z5wEeAVovNjbdvjEWakg8UqWc9qy64BQxjYJs8SR9NriRfTE/bPo9NkSWvLeZ85V5TmbLy6YPU3mb582gV75x4do4ujhndblwjPFpfTjmfM0acxwuao1vCBbmm3yyh0n3oO34A6bIjfj2vHyeY8vmEnD8gbSnTdPkXXm3fJzWb5wrq3e6o07pOLG5pT/9fyj9Ou7ZtD3h36kd1ask/W1f9Zt3k1WsaL49MPz6NH7bpMrcCvWbJaXB/TrQwvn3ijzN04ZK5//5P1ztFsdjmeKy+jVt1ZQfJyFHr77FvrN/bMpQ5ihvvG/n0olUK289JONYoWvDy1+4j66d+ZUuXq45lubyahaD3kQAAEQAIHQJgAZChka2n+h6J2RBLDyZiRdPNsjgYeEYsOKEu9723PguFy9unfWdcJE0baf6wuhCEVFRdD9QpFh80c2Xdy266A0J2xotFJcrMXh+X1Sk6XiwoWpvRJp0IC+DtedTzZ+Z/NSOWmsbc/apLHD5QrW3oMnaMzIAtlmvz6p8raUZMfn9ekwOUwTbartfLlpByUJ885fzLqe2NSRV9Q2bNtL24XXS/rlTL0LvDr2zCPzxepjtCzbvf+o8Ix5WJpL8uojrwZyShEmmurzZaHyz1qhfPHK4C+E0sqrdpzSUpLo2d+/I1YS/0rD8gfqtVkZZeWO06ihg+gr0a+d3x91WPnUKyMDAiAAAiAQ0gQgQyFDQ/oPFJ0zjACUN8PQ4sFdEYiNsdBTv5pDc26aLEwSt8lVLzYZfPnpB4QTk1QqLhV7uFrb6bHn39Af1WRtkXnei8b75bxJ7HCkrr5RVmUvltwm7wP7dscP0sRw+95DxJ/aOludjSL0ACtvvib2/MX74jho44PPvKrf3mi1UktLm3h+g9y3xxfSxV42TXHjc35fTjW1DS575OQFN/8Ul9rCBvBqoJbyc7IoOjKSis6Xa0XyODCzj8M576c7J+5n5S8CYTsc2OAEBEAABEKdAGSo7cdPHifI0FD/a0X//EkAyps/aeJZ3SLAK0JPLJxFrFy8t+prYicic2dMoZ5CoYgVq2sPzZ/h8lxeZfM2xQklUQueGBNjm+x5dY29NnJi00w1saMQVva6cnai3sN5jpjFe/d45ey+mdc5X5b7+7RC3u/nj6StPlpbbEotP7O9vU162YwQzlXUxPsLkUAABEAABC4tApCh3R9PyNDus8OdwSMA5S147MO6ZVamegqnJWoaKJQ4Ti1iBYvTQOExcr9wGsJ7uHIH9pNl3fmHFUPnpAX2fn3xI9Kjo3ad96K99dEa2iJMOaeL/WaRYm8eJ44lp6aoiI7yjr7yNd6DwOaOvJo1JK+/MN30Llad+lwtz14tOTU321ho5c7HguwsGdOOnaukT7A5KDl8olCaow4QZqZIIAACIAAClx4ByFDPYwoZ6pkPrpqbAJQ3c4+faXu/X7j0f1c48RgzokAqT2wGyStuHFSbPTdyuuOmq6Xy9p/CIce0yWMoV+xhKymvEq7uK+lesaesu6m+oYl2iFhw7Dkyq2N/nfasyWL/GzsXYeWOlbf87EyKFat1m3fu00MITBg9jEaIWHS8Mrhhyx7p7CRSKFtsaskrhq+9/TH9q/BSOe3qMcR75opLKqihyUpzpk/WmunymCfajRfhBHg/IL83pwljLnO5j520rBZeMt9Zvl4qjWxGw05TeAWQPWgigQAIgAAIXHoEIEM9jylkqGc+uGpuAlDezD1+pu19aq8EShZxx9YL5adG7AXjlCWce/ydCKzNShWnkcKpxt8/NJfeFa732R0/J3YCMm3yaJnv7j9bdu2XLv2v7nBUoj6HY7VdMSyPdu8/RoVizxg7T5l/yzW0av139Mqb/ye9NbLyxjHnZgtlbO3mXcSBQNkBCCtvHAKAVw6XCVPMN5d9IR/NJpIcm82XxPvmuN2PRfiDV/60XLLpTHnrLZyTPPf4vaKt1fTJ+m3UKhzA8F42dobiydGJL31BXRAAARAAgdAiABnqeTwgQz3zwVVzE+ghYmnZAmj58T1+97ndWcNztzzp8OQ5dy3Uz1cs+Wc9j0z4Eqi+UEcWsbrFQavdJQ563SRc6ycnxEsFzl09o8rZRKVK9JMDdEco5p7sLbO6tk6uvjmbgbKDkta2NuF9Ml46MelO3/R2RYBuFkaeEu97axFmlr7u1fP0TFy79AnMWfQv+kuuWPa2nueMp7ncoSJO/Etg6V778+62mUPbC4ggR1UayEOGuv8bgAx1zwZX/EPAkwyVLXQxn3enFwFfeWNTM/Zux4kDL7OZHFJ4E2CFqKsUHxtD/AlWYsWMV7mcEytU7MGys8Srcxeb3LXb2XMtUVHEHyQQ8JYAz8FaYlNbJHMQ4LHSfneFHDXHmBnZS8hQ93QhQ92zwZWLJ6DKUNZvApU8/5RvQC96KKsW7T+1G9ACHgkCIAACIOANAXUOdl459uZ+1AkOAXWs1DEMTm/QKgiAAAiEJwFe2dWSqt9oZUYdA668qe7L2eQMCQRAAARAIDgE1DlYNQcOTm/QqrcE1LFSx9Db+1EPBEAABEDg4glwzGAtqfqNVmbUMeDKGzuc0BLvB0ICARAAARAIDoHWVvscrIXFCE5P0KovBNSxghz1hRzqggAIgID/CDjIUEW/8V8LnT8p4MqbxWLRe8IOKJBAAARAAASCQ6Cp2T4HR0e7dxgUnN6hVXcE1LGCHHVHCeUgAAIgYCwBVYaq+o2xrRIFXHmLi43V36mxyf7FQS9EBgRAAARAICAE1DlYnZsD0jga6TaB+DjI0W7Dw40gAAIg4CcCwZKhAVfeEhPtngUv1NT7CR8eAwIgAAIg4CsBdQ5OSkr09XbUDxKBxET7WKljGKTuoFkQAAEQCEsC1TV1+nur+o1eaFDGEOXNk7PM3qkp+quUVFTpeWRAAARAAAQCS6C0olpvUJ2b9cKOjKc53bkuzo0noI4V5KjxvNECCIAACHRGoKzigl6szst6oUEZQ5Q3ta9aLBqtLCuzn5al04Uleh4ZEAABEACBwBI4cbpYb7B/Vqae54zz3O1wESeBI9ARF1VtEHJUpYE8CIAACASHwMmz5/SG1XlZL1Tnbz/GgTNEeVNj0Dh/AcjLzdHfad+RU3oeGRAAARAAgcASOPzjWb3BIYPz9Txn1LlbndMdKuHEGAI9lbVOexx1vS1Vju4/elovRwYEQAAEQCBwBNT5Nz93kGvD6vytTOuuFX0rMUZ562F/rHMA0WFDB5P2ReB0UQlVVNX41mPUBgEQAAEQuGgCFdW1dPLsefkcnpMHF+Q5PFOdu3sqc7pDJZwYQ0D9hVYV/h2tqXL0VOF54rFEAgEQAAEQCBwBnndZj+HEMnTo0ALXxtX5O9SVt4ge9lhubT/Z4wjxW8XHxdGoEZfpL/jlNzv1PDIgAAIgAAKBIbBGmXtHDh9GsTExDg23trfq5+qcrhciYxwBdeVNCQKrNchylMdMS19u2qFlcQQBEAABEAgAgbVOMpTnZZfUZg/iLTQ8l8vdLfDfk5QeREVG6mfNrS16XstMv2GqlqUvNu4Qvxpi9U0HggwIgAAIGEygUvxiyHOvlm664Totqx9b2uzKmzqn6xWQMY5ApCKaWxThr7SojtmXm3YSjykSCIAACICA8QR4vl2tyFBVr3FovVWZv9V53aGS7yeKhPD9Znd3WCLtgbitrVaXamNHX0G5g7JlubW5hZa897lLHRSAAAiAAAgYQ2DJe5+RFtx5UE42jR1zhUtD1hb73K3O6S4VUeB/AlGKaG5xtF7RGuMx0+Qoj+UfxJgigQAIgAAIGE9gyfufKzJ0ILFe02lS5+9ou1Vip3V9KFQkhA93dVE1LtoeQLTe2uhSu4ew53/gl/cQHzn97cBxem/VVy71UAACIAACIOBfAjzX7hFzLic5Fy+4W5+L1Zbqm+1ztzqnq3WQN4iAxW69Qk32FVC1NR67+xc4ytH3P/larYI8CIAACICAnwnwPLtn/zH5VJsMvUf35eHSlDp/h7rylhgTr/e/usEeA0EvFJmhgwto9u0360Ufr90KBU6ngQwIgAAI+J8AK24812pp1m0ziJ1fdJbUuTspNqGzKigzikBctP3J9fYVUHuhLTdsSAHNus0uR1eu2UJQ4Jwp4RwEQAAE/EOA51eeZ7XkSYbKOvXNWlUidV63l3Yrp/y81637O70pLaG3Xl5SU6bnnTPz586is0XFtGPnHnmJv1Rw7LdF995Kqb0SnavjHARAAARAoBsEeF8xm6ezlYOWxo8bTXfdOVs7dTmeu2CPw5mWkOpyHQUGEkiybz2gqiaPDd115ywqLLbLUf5icUrI0UeEHE1JhtLtER4uggAIgIAXBHiPG5umO8jQsT+j+R5kqHxsld2ChdR53Ys2PVWJeF4kTxW6c625tZn2Fx+Rt7a1t9GY7FGdPoaXG8ePGS0UuCIqKrYFujtXVknrt+yRtqRZfdMoLkYRYp0+BYUgAAIgAAKdEeBQLCvXbqHX315FhefL9SpXjhtDTz32MEUqzqX0ix2ZjYe3Up21QZ5NLriSUuKSnavg3CgCrWKf2+lq29Pbha/pgjS3LelytLBQyFFb6IdzpZW0bvNusoq9cJmQo27Z4QIIgAAIeCLA4QD4B7HX3xEy9Jx9MYp//HzysUXUpTOvvUK30UwnL0snSvSPTtNDBGJVoxB4egevr/FG91fWLaF28Wje1fbE9Q9SYoz7XwDbhSvkZR+tpJWfrHZpY9CAvjQ0dwDlZWdSeu9elJwYR7GxFoqxRFNkRARFRPSkCOF+kwUYEgiAAAiEA4F28YWe47C1CTfErW1t8seuxqZmulBTTyUVVdKCYd+RU3oMGo0Jz5Nsrs5WD1q8Te2aeqxtqqPXNrxJLBx6int+e+MjFB2pmPKplZH3PwHe5L58n4iU3vHomcOFyU2Ux3baWI5+uII+/vQLl3rZWRk0YnA25Qh5mtE7hZKT4kVoiGjIURdSKAABEAgHAqz68JzpSYZyAG6Oo6kmlqFsqs4WD55kqLynQXjbX3XAdjurKHeMJIryj9MSQ5Q37unS7SvpRNlp2enJ+ePp2qGTZN7TP4cOH6W33l1KJ0/Z7vNUF9dAAARAAAS8J8CeCdlRFO837irxqtuW47ZQAnl9sunuK92bV3b1LFzvJoGNJ4jOdbj/H55BdHk/rx508PAR+rOUo2e8qo9KIAACIAACXRNgGcpOonivsVfpe7HqdqBj+0E/sRXs2jyvbvOmkmHK29HzJ+jDXZ/KPkRHRNGjUxdSgiW+yz7xKtzuPd/Tl+u+on0HDhGfI4EACIAACPhOgH8ZHDXiMuIYNOzK2BsLhTprPb3x9dvU3GaL0Tlv3G00OMN/Qsf3twjTOwqFs69vT9penuMD3SqCcsd6Xn3TSLHc3LVnL61Z9zXkqAYFRxAAARDwkQDL0JHDhxHH1eTwLN7IUNlEo5Cfnx0i0uK8TRlE1N9/Ww8MU96483/a/D6du1Aq3yM/PYfuGj9L5r39p7GpiY4eO0FHjh6nQuHYpKKyimpqaqmhsZGam5uptaVVLnuyoDLA+tPbbqJemBJgiyZeCUcCgUATYAHCQiVCmI5HRkaQxWKhuNhYSkxMoN6pKZSV2Y/ycnOkJ8n4uDifuvfBjlV0rNSmNPRLTqdfXX2PT/ejsh8JrBF7xys7NrxnJhFdk+vzw+sbGujw4WN0/MeTcm85y9Ha2jopR61WK+Soz0Rxgz8JQI76kyae5S2BzmVojJChiRctQ2UfNv1IVFxj606qkMHTO/fq7G1/nesZqrwVVhXTO1s/1M32vTWfdO4kzkEg1Aiwie+zL7xELy5+xq2r9VDrM/oDAl0RUM0l+YeJBRPvpAGpWV3dhutGESivJ1p3zP50H8wn7TchBwKhSQByNDTHBb26SAKquSQ/apows+zTteWhL60aEqRb60D/lEyaJPa7aYn3UPCXAyQQMDuBD5evkq/w0YpPzP4q6D8ISAKq4sYFPHdDcQvyH0eaEPissGmJ90/wFwMkELgECECOXgKDiFdwJOCsuPH87WfFjRs0VHnjBthRydC++ZyViRU4NsupaxK/KCKBgAkJ8K+FvB+T0w/7DxKfI4GAWQnwHrdlOz7WHZTwe/Ccfc2QiWZ9pUur3+yoRN0rwQocm+TwngokEDApAchRkw4cut05AZ6PeV7WHJRwrQFij9uovp3Xv8hSQ+K8OfeJvwiU1VZQeV2lvFRZX017Tv9ALWJDPAd/tcAFtTMynIcwgTf+5y0qLbPHzCqvqKRrpkwK4R6jayDgSoDDAWw9toNW/W0NlXXMzVyL5+vZo2d07QbZ9ZEoMYrAgF5EF0Sw7hqrrYVacTxeYdsMnxTjN/fTRnUfzwUBZwKQo85EcG5KAhwOgBW2787Y5mjtJfgHt8k5HGtHK/Hr0dA9b2pP2aEIm+VsO7FT3wPH1/m10pPSKKf3AMpI6kO9RCDYeEusUOgsIq5QFEX0iBDv3hNfJFSYyAeNgGaj79wB7H1zJoLzYBJgJ04yDtxPbdTc2kLWVivVWxupuuECldSU0emKQnlkZwFa4rmYTSW9Ceui3YNjAAkIGSpNJg/anIA5tMwb4tPEh4/xIh5fTKRNoYsSxjXCsY0UtAZ9iXDoB05AwAsCkKNeQEKV4BMQ8VSlwiLiqUqvkRx/kwNu1zcLR1INRGXCgrCqw6GU2tsA7E0OmPKmvdfZyiJae2CT7oVSK8cRBMxA4PiGE1RX4mrym9g3gfKuyzXDK6CPIOBCgL1KTh9xLfE+ZaQQJ8BfGHYX2b48hHhX0T0Q6IzA89tW0D7xA5JzGpU2gBZPQExJZy44NwmB1Fiisf3FD2n+dU7S2dsHXHnTOnG05ATtOvU9nSw/I34hVn//1WrgCAKhRaCutJ6OrxeBc92k/Gl5lJBu/H9aN82jGAR8ItBThBsYlDaQxmZfToP7Io6bT/BCoTLHgTsqzLdLRCBviNBQGBH0wQsChyqK6Nlty93WfHHiHTSsd5bb67gAAiFFgE1W+ooA3AVpjnuTDe5k0JQ37b2sLVY6K0IKFFefp4p6EcetsU7sw26iJvFpaRVx3ITZD5v/aMG6IaM0cjgGmoC7VTetH1h900jgGCwCmnU9x4Bjc3M2O4+KjJRm6HHRsZQUK+LAxacSr7QNFCEALFGWYHUV7fqLAJvyVAgTnnLx4X1xbNLTLMq4nAPEStMfITn5iAQCQSbgbtVN6xZW3zQSOAadAJuaix855b61SGF+HhVBZBGfOGGaniz2GrOpem/x4fIAJ2EYH9zEXx7y0wfJT3B7gtZBwD0BaaP//kvuK4grtefr6I78mxH3zSMlXAQBEPArAf7iwL/88gcJBEKYgPQw+ZmruaTa5R/Kz9Kh0XGQoyoU5EHAiUDQV96c+oNTEAhJAudLSom9Smpp8e9e1rL0wnP/oOfTeqdS34x0/RwZEAABEAABEAABIshR/BWAgH8IBH3lzT+vgaeAgLEEWCFzp5SNuGyosY3j6SAAAiAAAiBgcgKQoyYfQHQ/ZAgII04kEAABEAABEAABEAABEAABEACBUCcA5S3URwj9AwEQAAEQAAEQAAEQAAEQAAFBAMob/gxAAARAAARAAARAAARAAARAwAQEoLyZYJDQRRAAARAAARAAARAAARAAARCA8oa/ARAAARAAARAAARAAARAAARAwAQEobyYYJHQRBEAABEAABEAABEAABEAABKC84W8ABEAABEAABEAABEAABEAABExAAMqbCQYJXQQBEAABEAABEAABEAABEAABKG/4GwABEAABEAABEAABEAABEAABExCA8maCQUIXQQAEQAAEQAAEQAAEQAAEQADKG/4GQAAEQAAEQAAEQAAEQAAEQMAEBHr8JJIJ+okuggAIgAAIgAAIgAAIgAAIgEBYE8DKW1gPP14eBEAABEAABEAABEAABEDALASgvJllpNBPEAABEAABEAABEAABEACBsCYA5S2shx8vDwIgAAIgAAIgAAIgAAIgYBYCUN7MMlLoJwiAAAiAAAiAAAiAAAiAQFgTgPIW1sOPlwcBEAABEAABEAABEAABEDALAShvZhkp9BMEQAAEQAAEQAAEQAAEQCCsCUB5C+vhx8uDAAiAAAiAAAiAAAiAAAiYhQCUN7OMFPoJAiAAAiAAAiAAAiAAAiAQ1gSgvIX18OPlQQAEQAAEQAAEQAAEQAAEzEIAyptZRgr9BAEQAAEQAAEQAAEQAAEQCGsCUN7Cevjx8iAAAiAAAiAAAiAAAiAAAmYhAOXNLCOFfoIACIAACIAACIAACIAACIQ1AShvYT38eHkQAAEQAAEQAAEQAAEQAAGzEIDyZpaRQj9BAARAAARAAARAAARAAATCmgCUt7Aefrw8CIAACIAACIAACIAACICAWQj8P13QZpfKgqvLAAAAAElFTkSuQmCC">
<center>Figure2: Different types of layers in encoder and decoder component of a <a href="https://jalammar.github.io/illustrated-transformer/">transformer</a></center>
To get a more detailed explanation on **different forms of attention** visit [this](https://towardsdatascience.com/attention-and-its-different-forms-7fc3674d14dc) page. Also there is a great blog post on [Visualizing attention in machine translation model](https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/) that can help in understanding the attention mechanism in a better way.
An **“annotated”** [[3]](#References) version of the paper is also present in the form of a line-by-line implementation of the transformer architecture.
# Prerequisites
- Inferencing workflows for pretrained text models of `arcgis.learn.text` submodule is based on [Hugging Face Transformers](https://huggingface.co/transformers/v3.0.2/index.html) library.
- Refer to the section [Install deep learning dependencies of arcgis.learn module](https://developers.arcgis.com/python/guide/install-and-set-up/#Install-deep-learning-dependencies) for detailed explanation about deep learning dependencies.
- **Choosing a pretrained model**: Depending on the task and the language of the input text, user might need to choose an appropriate transformer backbone to generate desired inference. This [link](https://huggingface.co/models) lists out all the pretrained models offered by [Hugging Face Transformers](https://huggingface.co/transformers/v3.0.2/index.html) library.
# Inference only models
The `arcgis.learn.text` submodule offers the following models pretrained on unstructured text:
- **ZeroShotClassifier**
- **QuestionAnswering**
- **TextSummarizer**
- **TextTranslator**
- **TextGenerator**
- **FillMask**
These models can be imported using the below command
```
from arcgis.learn.text import ZeroShotClassifier, QuestionAnswering, TextSummarizer, \
TextTranslator, TextGenerator, FillMask
```
## ZeroShotClassifier
[Zero-shot learning](https://towardsdatascience.com/applications-of-zero-shot-learning-f65bb232963f) is a specific area of machine learning where we want the model to classify data based on very few or even no training example. In **Zero-shot learning** the classes covered in the training data and the classes we wish to classify are completely different.
The **ZeroShotClassifier** model of `arcgis.learn.text` submodule **classifies an input sequence from a list of candidate labels**. The transformer model is trained on the task of **Natural Language Inference (NLI)**, which takes in two sequences and determines whether they contradict each other, entail each other, or neither.
The model assumes by default that only one of the candidate labels is true, and returns a list of scores for each label which add up to 1. Visit [this link](https://huggingface.co/models?search=nli) to learn more about the available models for **zero-shot-classification** task. To get a list of supported transformer backbones for this model use the below command.
```
print(ZeroShotClassifier.supported_backbones)
```
The command below creates a model object by calling the `ZeroShotClassifier` class.
```
classifier = ZeroShotClassifier()
```
A sample code for performing **single-label classification** task.
```
sequence = "Who are you voting for in 2020?"
candidate_labels = ["politics", "public health", "economics"]
classifier.predict(sequence, candidate_labels)
```
For **multi-label classification**, we simply need to pass `multi_class=True` in the `predict()` method of the model. The resulting per label scores for multi-label classification are independent probabilities and fall in the (0, 1) range.
```
sequence_list = [
"TAKE THIS MAP DOWN! YOU DO NOT OWN THIS MAP PROJECT OR DATA!",
"This imagery was great but is not available now"
]
candidate_labels = ["toxic", "severe_toxic", "threat", "insult", "identity_hate"]
from pprint import pprint
pprint(classifier.predict(sequence_list, candidate_labels, multi_class=True))
```
The **ZeroShotClassifier** model has been fine-tuned on [XNLI](https://cims.nyu.edu/~sbowman/xnli/) corpus which includes 15 languages: Arabic, Bulgarian, Chinese, English, French, German, Greek, Hindi, Russian, Spanish, Swahili, Thai, Turkish, Urdu, and Vietnamese. So this model can be used to classify **multi-lingual** text as well.
Below example shows how this model can be used to classify an input sequence written in Spanish language.
```
# Classification on spanish data
sequence = "¿A quién vas a votar en 2020?" # translation: "Who are you voting for in 2020?"
candidate_labels = ["Europa", "salud pública", "política"] # ["Europe", "public health", "politics"]
classifier.predict(sequence, candidate_labels)
```
This model can be used with any combination of languages. For example, we can classify a Russian sentence with English candidate labels:
```
# Russian with english candidate labels
sequence = "За кого вы голосуете в 2020 году?" # translation: "Who are you voting for in 2020?"
candidate_labels = ["economics", "public health", "politics"]
classifier.predict(sequence, candidate_labels)
```
## QuestionAnswering
**QuestionAnswering** model can be used to extract the answers for an input question from a given context. The model has been fine-tuned on a question answering task like [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/). SQuAD belongs to a subdivision of **question-answering** system known as [extractive question-answering](https://medium.com/deepset-ai/going-beyond-squad-part-1-question-answering-in-different-languages-8eac6cf56f21#:~:text=SQuAD%20belongs%20to%20a%20subdivision,referred%20to%20as%20reading%20comprehension.&text=When%20an%20extractive%20QA%20system,the%20question%20(see%20diagram).), also referred to as reading comprehension. Its training data is formed from triples of question, passage and answer. When an **extractive question-answering** system is presented a question and a passage, it is tasked with returning the string sequence from the passage which answers the question.
Visit [this](https://huggingface.co/models?filter=question-answering) link to learn more about the available models for **question-answering** task. To get a list of supported transformer backbones for this model use the below command.
```
print(QuestionAnswering.supported_backbones)
```
Use the below command to instantiate a model object.
```
model = QuestionAnswering()
```
A sample code to **extract answers** from a given context for a list of questions.
```
context = r"""
The arcgis.learn module includes PointCNN model to efficiently classify and segment points from a point cloud dataset.
Point cloud datasets are typically collected using Lidar sensors ( light detection and ranging ) – an optical
remote-sensing technique that uses laser light to densely sample the surface of the earth, producing highly
accurate x, y, and z measurements. These Lidar sensor produced points, once post-processed and spatially
organized are referred to as a 'Point cloud' and are typically collected using terrestrial (both mobile or static)
and airborne Lidar.
"""
question_list = ["What is PointCNN?", "How is Point cloud dataset collected?", "What is Lidar?"]
model.get_answer(question_list, context=context)
```
## TextSummarizer
Text summarization [[4]](#References) refers to a technique of shortening long pieces of text. The intent is to create a coherent and concise sequence of text keeping only the main points outlined in the input sentence or paragraph. It's a common problem in **Natural Language Processing (NLP)** domain. Machine learning models are usually trained on documents to distill the useful information before outputting the required summarized texts.
The **TextSummarizer** model can be used generate summary for a given text. These models have been fine-tuned on **summarization** task. Visit [this link](https://huggingface.co/models?filter=summarization) to learn more about the available models for **summarization** task. To get a list of supported transformer backbones for this model use the below command.
```
print(TextSummarizer.supported_backbones)
```
Sample code to instantiate the model object and summarize a given text.
```
summarizer = TextSummarizer()
summary_text = """
This deep learning model is used to extract building footprints from high resolution (30-50 cm) satellite imagery.
Building footprint layers are useful in preparing base maps and analysis workflows for urban planning and development,
insurance, taxation, change detection, infrastructure planning and a variety of other applications.
Digitizing building footprints from imagery is a time consuming task and is commonly done by digitizing features
manually. Deep learning models have a high capacity to learn these complex workflow semantics and can produce
superior results. Use this deep learning model to automate this process and reduce the time and effort required
for acquiring building footprints.
"""
summarizer.summarize(summary_text, max_length=100)
```
## TextTranslator
Machine translation is a sub-field of computational linguistics that deals with the problem of translating an input text or speech from one language to another. The **TextTranslator** model is a class of inference only models that are fine-tuned on a translation task. Visit [this](https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/) link to get a more detailed explanation on how machine translation model works. These models uses technique called **Attention**, which highly improves the quality of machine translation systems. **Attention** allows the model to focus on the relevant parts of the input sequence as needed.
<img src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAeAB4AAD/4QCCRXhpZgAATU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAABJADAAIAAAAUAAAAUJAEAAIAAAAUAAAAZJKRAAIAAAADMzEAAJKSAAIAAAADMzEAAAAAAAAyMDIwOjEyOjAzIDE1OjM1OjIwADIwMjA6MTI6MDMgMTU6MzU6MjAAAAD/4QGgaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49J++7vycgaWQ9J1c1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCc/Pg0KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyI+PHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj48cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0idXVpZDpmYWY1YmRkNS1iYTNkLTExZGEtYWQzMS1kMzNkNzUxODJmMWIiIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyI+PHhtcDpDcmVhdGVEYXRlPjIwMjAtMTItMDNUMTU6MzU6MjAuMzEwPC94bXA6Q3JlYXRlRGF0ZT48L3JkZjpEZXNjcmlwdGlvbj48L3JkZjpSREY+PC94OnhtcG1ldGE+DQo8P3hwYWNrZXQgZW5kPSd3Jz8+/9sAQwAGBAUGBQQGBgUGBwcGCAoQCgoJCQoUDg8MEBcUGBgXFBYWGh0lHxobIxwWFiAsICMmJykqKRkfLTAtKDAlKCko/9sAQwEHBwcKCAoTCgoTKBoWGigoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgo/8AAEQgB8QH2AwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/aAAwDAQACEQMRAD8A+qaKKa2SwAJH0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU3af77fpRtP99v0oAdRTdp/vt+lG0/32/SgB1FN2n++36UbT/fb9KAHUU1M5YE5wadQAUUUUAFNb/WL9DTqa3+sX6GgB1FFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUE4oqK6hS4t5IZQTHIpRgDjIIweaAKthqdvf3V9BbsxezlEMuRjDbQ3Hrwwpmh6va6zDcS2TMyQXEtq+5cYeNirD6ZB5rxK88IeGLbXtVstC8Ha9rJgmAuJotReONJCo+QEyAscYzXrfw/to7LwxbW0GiSaHFGXC2cjh2XLEliwJySSTnNAHSUUUUAFFFFABRRRQAUUUUAFFFFABRRmkzQAtFAooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAav3n+v9KdTV+8/1/pTqACiiigAprf6xfoadTW/1i/Q0AOooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDyHXGOnfEa6tdK8bHTb3VpI86emnrOqybMAsx+6SB3IzXoHgueSfRv3+rrq80U0sMlwIBD8yuVKlR0IIxXn3i7QvE+neJXvNE0i21a0n1OLUtxukgkjZU2lG34yvGQQePSu3+HujXejaNcf2m0R1C+vJr64WE5SN5HLbFPcAYGe9AHUUUUUAFFFFABRRRQAV538TNTuodd0HS/+EgHh7Tr3zjNeL5YdnUDbGC+QoOTzjtivRK86+LOrwWP2W2Xw9Y63feRPdqt7gRxRRAFznaTk5AAAoA0fAtnFbX1wYvGlz4hJjAMMs8Mgj5+9iMA+1dpXmPw2vLmPxNPYah4a0LRpJbBLyCXTnLGeNjg5+RcbTjIPqK9OoA434hPfWU2h6taWl1e2un3TSXdta8yNGyMoYLn5tpIOKzNH1mfxZ400y+0ux1W00qwt51uJb23e3Ervs2oqNgnG0nOKteOls9JsHl1LxJr9kLu6zClid8pbb/qo0VGJXjOMfjWb8P7q2m1/bDqvjW6byWPl6xbPHB1HOTGvzenPrQB6aOlFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFADV+8/wBf6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFBopDQB4brC+GZ/iB4ki8TaFqHiOeOSMxyW9s9wlumxf3RA4U9+Oua9Q8AxaVF4eiXQdKn0qyDtttpoDCynPJ2nnk1xGrnXNE8eX8GieJvDNodXlWWLTr2F3lL7AC3ysDzt+nFd/wCD7i/udIDateafeXayujy2ClYsq2CuCScggg89RQBt0UUUAFFFFABRRRQAVwvxF1DS9OvbGS40u51XVmguEgtoGCkwlR5pYkhQuMde+K7qvOvi/b2sNvZar/b0Gi6jCstvDJNCZ1mSRfnj8tfmPQHI5GKAIPh1YaLpviKH7K2qTX15pEdzbve3BlENsW/1SemCRn145r0yvKvgzb211M+oTeIrfWdRtbOPT0igt2txawL0BR/nyxGSx649q9VoA8/+KhvxdeGBoa266w2oEW1xck+TF+7bfvA5OVyABg5rW8Or4vXUM+IJtBey2HiyjlWTdxj7xIx1rM+LsOk3Oj2Nrq+m32qzzXSizs7KYxSSSgE53ZGABkkk8VhfDu2sdL8WJa3mia1o2qS27vbi81N7uGdARuCncQGGR1H0oA9aooHSigAooooAKKKKACkJA6kClrifil4Yg1zw7e3SwXM+p2lrK1okNzJHl8ZAwrDPPrQB2u4eo/Olr5un07wX/wAI/py+HNT1m78ULJAY7U3dwZXk3LuWVCcKOua+j4siNNwwcDIoAdRRRQAVFdXEVtC0txKkUajJd2CgfialqpqljaajZS22o2sN3ayL88MyB0Ye4PBoAxNN8a6NfaKdRF9axIPMPlvOgb5GI9e+M1r6DqSaxo1lqMUbxx3USzKj9QCMjNeKxwaNJYaJaeF/BnhV9Wvmurgi9tldIIYZCCWIG7cSQB2B9hXsXhHU01nw1p2oxQiBLiBXES9E45A9hQBr0UUUAFFFFABRRRQAVzvjbxbp3g/T7e81YyeVPcJbKI13HLd/oBkk10VcDr+hy+J/HUkWoQONGsdMkijLrhZZ58qxB77UUf8AfdAHeowdQynIIyDS1zPw5bUB4SsrbWYpI7+zDWshcY8zyyVDj2IAP4101ADV+8/1/pTqav3n+v8ASnUAFFFFABTW/wBYv0NOprf6xfoaAHUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFZJ8SaMNcGj/wBpWx1QqW+zB8uABk5HbigDWpDUNjeW1/ax3NlNHPbyDKSRnKsPY1MaAPGfFmmazpHjGS9t/DVzq8U2qQ36Xdo0ZkCLHtMTBmBGD07YPau7+G2l32m6LdSatELe9v724v3tlcOIPMcsEyOCQMZxxkmvMfGmoWOk/EiO+8SXF1b3cOqwtaPJ5nkC08vnYB8ud2c984r0f4VtczeH7u6uFnS3u9QurmzScEOsDyEpkHkZ5IHoRQB2dFFFABRRRQAUUZozQAV598RbTWofE3h/W9A0aLVnslmjliluUhCK4X5gW/i4/nXoNeZfFyO2bVtBPiGK8l8Kjzfti24cqJcDyzIE5Kfe9s4oAzbC58aP46k8QSeCoUR7QWQWHUoiSu/cXc9yOw+vrXrynI54PpXlXwwj00eMtRfwXDdxeFjZr5m8OsDXO7jyg/8As/exx0r1agDh/iqunwaPaalf66NCuLG48y1vdnmYcggps6uGBIIHNcx8PdZ03xH4xiubzxaNc1W3t5FtoIdMktIolJXe3zA5Y4XqfpXVfEjTtUuBouoaHaWl3e6bdmbZdTCOPYUKtkkHnB49Kn8K61r+p3uNR0fT7az2HM9tfrOd3YYA+v5UAdcOlIxx16Utcp8SNOudV0azsoEme2mvoFvEhYqzQb/nGRzj19s0AdMtxEzYWRGPYBgTUteA+CfDNpo/jLR7ez8O3tvrVlfXQu7toW+ztas0hjYOflJwYwMcjBr36gAorO1/V7XQ9Nkvr9mWBGVTtXcSWYKAB9SKwL3x9pdp4qGhPFePKJI4ZblISYIZZBlI2bsxGPzFAHYVkeL9Tm0bwtq2pW0PnTWttJMkZ/iKqSBWuKzvEepwaNoOoaldo0kFrA8zooyWCgkigDyKDXdS0nR9fvZNdg1C+22M9rMsMahmmIzGgAyQegySa9siJaNSwwSORXjXh/wtdR6vba5Y/DzQbN5Ssys19l4gechQu0Ng9q9mX7ozxQAtFFFABSNS0jdKAPIrmx8R6JfazqOlfD7RXNx5gkki1PbJPGSSeNnBPUgHrXofgq4guvCekzWkEVvA9shSGJtyxjH3QT1x0rx/WPEmiyTap9p8V+ORaQzSRXKwWTiOPkgoHCcDt1r1L4e61oOo6LHZ+HHdYLBEiME0bRyxDHy7lYA8gZz3oA2PEOsWmgaNd6pqLlLW1jMjlV3H6AdyemK5/wAO+PLXVNTj03UNN1LRdQmTzbeHUI1T7QncoysVJHdc7h3FaXjvSINd8J6jp93ci0hljybhsYiKncGOewIFcL4k8c/DzxHaro2o+ILdrlMPHdxhlMMo6SK+MAg++KAPQ/C+tw+ItGj1G2ikiikeRAsmNwKOUPQnuta1c58PNFXw/wCELDT0vv7QVA7i6wB5u9y27jj+KujoAKKKKAIrqNpbeSOORonZSBIoyVPqM+lePrpHiBvHF1oknxG1W3jgs47lRJFb+ZMXZwduUxtXbz15PbjPsteZeP7eTxN4kfRLTwzouqyWMEdxJcaq5VU8wsAqbVLfwEntQBf+Fdzfm48R6fqOtz679gvFjjvZERQQUDFBtAGVJwfwrvq5rwFp9/pekG01DTNH01I2/cwaWW8sL3JyBzmuloAav3n+v9KdTV+8/wBf6U6gAooooAKa3+sX6GnU1v8AWL9DQA6iiigAooooAKKKKACiiigAooooAKKKKACiiigBGGRXiUtjr/hnxTE8Hha61TZqF3eLe2rx/v1mUhBIWIKlSdv0HFewa5qtnommy3+pzeRaxEb32lsZIA4AJ6kV4TpuqeDNbv8AVrnxZrurNfG8k8oxzXMcPk5/d7AgA+7jPfOaAPX/AIdaPdaF4PsLHUPKF4N8sqRHKI8js5VT6Ddj8K6U1i+Dxpw8OWI0SWWbTtp8mSV2ZiMnqW5POetbRoA80uL/AMXaN4o1pLbwrNrelTzLNbTG/ijKHYoZQrHgZHtXb+G76+1HTEn1TTH0u6JINu8yykAHg7l45ryHXrfwrJ468SN4412/0icyJ9liOoS20TxeWvzx4IDHOcgdKn+E9pf6bqWiy2dxq82m6j9tEq3jyOGijk/cTfNyjMvbv1xQB7XSZx1pRXjXji50F/iJeWvjTVNThtFtYnsY7WSZIlPO/d5f8ROMZ7UAeyAg+lKa5L4br4fXR5/+EVubm5s/PO9p5JHYPgcZk5xjHtXW9qAPK/ix4i1yzubyy0TU00n7HpraiZTCsj3BDY8td3AA7nBPIrq9E1ia78ba7pwnSe0tYLaRdgH7mRg25CR1OArf8CrnPidHZ+INWg8OjwvHrt4lubpmluRbrBGTt+/1yTngDtzW38ONKn0ewntZfD1nokW8Mot7r7QZiepZsA5+uaAOxrhfHqeI7XxDomqeGbBdRWBZorm2kvBAjKwXB56sCOPxruq4X4mjwqVsf+EtF4RlvI+zxzt6Zz5QPt1oA0/Cer+INQupo9c8PRaVCqbkkS+Wfec9MADH1rp68h+F1tpSeP8AUZ/Cdvqf9iNYKJZrxJ0QT7/up5oBPy8nAxx7169QBwHxctftFlpLX1ld3+hRXe/Ura1Us7x7DtJUcsobBKjqKwPA8Gjy+Ora68CaRdabpKW0q6izWsltDMx2+WAjgZYfNyB0rrPiFqs+iTaHqLm6/siC7Jvzbxs5VCjBWYKM7Q2M1n6b4qh8T+OtNHhe6lutKtbaY30qRsINzFfLXcQAXyG6dBnNAHoVcd8Vb24sfDCPDeTWFvJdQxXd5B9+CBmAdgcHHHftXYjpXJfEy71Gx8PRXWk2d3fyRXUTS2drGJHni3fOmD2xQB5/4C8RrcN4f0rS9bm1G5TWb4Sg3BmY2avKFaQ+n+rwT+Fe21554N1+wfVI7Ww8C63ojXGfMuJdMSCMHGfmZTXodAHPeP7LTr7wnqMWsXbWVkI/Ma5UgGEqdyuM55BA4ryzQNZ8LXJtNHOu6te6nfaxHdz3U+lyR/apk24T7gVAAidOgFemfEqzt7/wheQ3l9BYQq0chuLhtsa7XDDcfQ4x+NclHoWo6xrJbRdS0W68OS6nHqhnhlLzxsAu6NcZXkjrkcEjFAHqi1l+K0t5PDOqpfSSxWrW0glkhTc6rtOSowckDpwa1FrG8XWkOqeH9S0qW9S0e8tpIxIWAKgqQWx6DvQB4hpOs+G9NtdP+z+O/iB9iBVYFeyYxyAdFB8jkY44Ne7+H9ZsNe0yK+0m4FxauSA4BByDggg8gg9jXmE//CQ65pdh4f1CXwtaWcUkPm3trf73dY2BHlxbRtJ2jvxnvXofhXRH0YapvkR/tl9JdqEUgIrBQB9fl59yaAN6imyOqIWchVAyST0FcKPir4bdj9m/ta7izgTW2l3Esb+6uEwR7igDvKRxlSMkZ7imQSCaJJF3BXUMAwwcH2p7dDxmgDx5ZvEWi2N54dsLjwpcWLvMsd9dX+yREkZifMiwdzDce/OOcV2Hg3woNH1MahHfJcxNpltYLsX7/l7jvJzzndx6AVxPiJ/DV42oWrfDDXrm4lZ081NKCLI5J+YSEjAJ53fjXpngi0u7HwlpNrqKJHdw2yJKiYwpA6ccUAQfELRbjxD4N1TS7N40nuIsJ5hOxiCDtbH8Jxg+xrjtNs9a8ReJNJk1zwxYaHZ6bFIjj7UkzXG5dvlqFHEffn0HFdd8Sjqa+B9XbQ/O+3iE7PIGZMZG7Z/tbc4968c0/wAK+CfEni3SE8L2+sSOI5DqNxKbiMwjb8pLvjEu7sPegD2b4e6Nc6B4TtdNvDH5kLy7VjYsqIZGKKD7KQPwro65n4bSajJ4Osv7YMzXcbSRF512u6rIyozD1KgH8a6agAooooAK8g+KC6LF4wiupb3xVDqSwQwzHRpljjhieQqjSEjHLE+p46V6/XmPxD8J6treqXsnhbVNOinnit0vrW7Ut/q5C8bArypPI5GCKAOi8CRWtjNq+mQanq+oXFnMizNqUwkZdyBl2kAfKQfzzXWVyfw/0iexj1K+1LULW/1a/n3XUloMRIUG1Y1BJPygd+ck11lADV+8/wBf6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBx/xcv7jTPAGpXlm8yTxNCVMJ+c/vUyB7kcfjWXH8QJDGpPgrxQ3A5+wg/wBa6zxJZ6brelT6bqFwiwyMpcLKFYFWDD9QKvR3loFAFzBxx/rBQBDod6dS0y3uzaXFl5oJ+z3CbJE5Iww7GtCq/wBstP8An5g/7+Cj7ba/8/MH/fwUAeT+Ltf8S3/igWek3WmafYrqQ0xXuLQTuXMXmbjkgAZwAB9a7D4X6pqOqeHpv7auobrUrO8ns55IYgiFo3I+UA9MYrR1bR/Dmr2l3bajb2M0V0yvN8wUuy8BiwIORjg5zVnRLXRtD02HT9JFpa2cWdkaOMDJyTknJJPJJ5NAGtXnWvTa1r/jS+0XS9bi0SCwgikZlgSWa4LgnI3cBRjH1rvfttr/AM/MH/fwVz3ifwv4U8USRSa5bWtzLENqSicxuB6bkYHHtnFAEHw61O/u4tXsNVube+uNLvPsv2yCMIs4KK+So4DDfg47iuvYEoQDgkdR2rJ0Oz0TQtPjsdHWztLRMlY42AGT1J55Puav/bbX/n5g/wC/goA8iuPDIk8beS/xD1+PUoLUyCbdbKBGXwUPyc8jODXX+AybXV9X0x/EWo67LbLC7S3RiKx7w2FUoo54yc+1NuvAfga61ebUrnSdKluplw7OFIY5znHrnvW9oen+H9Bt3h0W306xidtzrbhEDH1OOpoA2TXk1542urXQdWlfUbdb2DX/ALGiyFQywecq4x6bSea9S+22v/PzB/38Fc1feD/BF/eS3d9omg3FzMxeSWWCNmdj3JI5NAFez157v4qPplpfRz6aujrcGONlYCXzipOR3xiu1rndC0Lwt4fmkm0PT9I0+WRdrvbRpGWHXBI61tfbbX/n5g/7+CgDzXxJ4wutNtPiB/p8Edzppj+xRyFcrmJW4B68k1tTa6JPiB4f07T72CSzuLO6mnjhZWBZfK2k46feNX9W8LeDdXvnvdV0jRLy7kxvmnhjd2xwMk80/RfDfhHQ7z7Xo2l6NY3W0p5tvFHG209RkduBQB03avM/iP411rQNYisLWzisNOljDNrd1C80MbE/d2oOvuxAr0T7ba/8/MH/AH8FI91ZupV57dlPBBcEGgDyv4W239ueJNa1LUvElz4gfTbqOOzmWbZb7XgRyVjU7ersM8nivXa5GLwp4VtvEMWtWMNtZaghO57WXyllz/fRTtb6kZrpvttr/wA/MH/fwUAUfFR1EaDenRLa0utQ8s+TDdkiJz6NjtXgXhm00S2+I2lSeKZJ9I8RyTr5FppkEUVtI+eAxiJLD/er37WYdK1jTprHUZYpLWYbZEWcpkfVSDVTRdF8M6GoGk2Wl2mP4olQMfct1J9zQBviuZ8fRabZ+H9S1q+0y3vpbKymwsiglkKncmewI4Nb/wBttf8An5g/7+Co7mawureWC5ltpYZVKOjupDKeCCPSgDxPW4NB0vVDLpXgnw5NaaXb211qE2wB42lbhYsDBIHzc9iK92jIMakdCMiuUsfCPhCw0efS7O0tIrCeVZpY1uG+dlIK5bdkgbRxnGBjGK6X7ZaAY+0wY6f6wUAN1Dy5LWWCRkBmRkVWbG7IPFeY+Grjxromi2elq3hKaG0jEMTm7dWKDhQccZAwK9FvYdIvpYJbwWc8luxaJpGUlCRgkenBxXP/APCDeAy2ToWhZ658pKAOut2ZoUMm3eVBbacjOOce1YvjnWNQ0Lw3dahpGlS6teRbdtrGcFgTgn14HPFasVzZRRqkc9uqKAqqHAAA7U77Zaf8/MH/AH8FAHgsniG/8W61oVpe+L0ijvbw291pGmo1rJEvls3zs37zqoGeBXvOm2cOn2MFpagiCFAiBmLHA9zyawvEvh7wv4lh2aza2Fw4+7LuCyofVXBDA/Q1qaWbHT9Pgs4LtWigQRqZJg7YHTJJyaAIPGGux+GvDeoavPby3MdpEZWiixuYD0zxXJw+OteeNZIvh7reyQbgRPBznv8Af9K7i7k068tpLe7ktZoJVKPHIysrqeoIPUU+O6so0VEnt1RRgKHAAFAHPfC7Wb3X/BVlqOqDbeSvMHUgArtlZQpxxwAB+FdXVO3l0+2hEVtJaxRgkhEZVAycnge9SfbbX/n5g/7+CgCxmobW7t7tC9rNHMisULRsGAYdRx3ppvbQ9bmD/v4Kraemk6dE8enixto3cyMsO1AzHqxx3PrQBo15f8SNSPgrxRD4mGTbX9lJp0ygdZ1y9v8AmS6/iK9I+22v/PzB/wB/BVa+XStQiWK++xXMausirKVcBlOQwB7g9DQBnfD/AEZtB8JafZTNuutnm3Dd2lc7nJ/4ETXRVX+22v8Az8wf9/BUsU0cozFIjgcZVgaAFX7z/X+lOpq/ef6/0p1ABRRRQAU1v9Yv0NOprf6xfoaAHUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBQtNYsbvVr3TbecPe2ao08eD8gfJX88GmXGqWj3l5pscw+3QWwuHjAOVRtwU56dVP5VxfhN0j+LvjkMyr+5ssZOM/I9ddNpNmmpX+sxhjeXFotu7bsqUQsVwPq5oA+RtF0m01KxN3fCWa5lllLyNK+Sd7e9SXXh2xW7sxHBL5bO3mYlfGNpxnn1o8K3FwNMjRbN2j82QeZvUD/WNzjrXR0AY3/CNaX3gf/v6/wDjXmPiSL7Nrd3DBJMkaPhVErcfrXs1eO+Lf+Rivv8ArpQBjSvNhNs9x94Z/et0/On7pP8AnvP/AN/W/wAaSigDI1G7uo7kql1cAY6ea3+NVPt95/z+XP8A39apdV/4/G+lUqALP2+8/wCfu5/7+t/jTW1C9/5/Ln/v63+NQUxutAFn+0L3/n8uf+/rf40f2he/8/lz/wB/W/xqrRQBZOo3v/P5c/8Af1v8aP7Rvf8An8uf+/rf41VNJQBb/tG9/wCfy5/7+t/jR/aN7/z+XP8A39b/ABqpRQBb/tG9/wCfy5/7+t/jR/aN7/z+XP8A39b/ABqpRQBcXUb0tzeXP/f1v8al+3Xn/P3c/wDf1v8AGqCfeFTUAWBf3n/P5c/9/W/xpft15/z+XP8A39b/ABqtRQBZ+3Xn/P5c/wDf1v8AGj7feZP+l3P/AH9b/Gq1LQBZW+vNwzd3PX/nq3+NbgeTb/r5+n/PVv8AGuaX76/WukXoKADfL5rfv7jbgY/et/jTt8n/AD3n/wC/rf40lFAGVLeXQkYC6uMZ/wCerf40z7bd7uLq5xj/AJ6t/jUc3+tf60ygCx9suv8An6uf+/rf41E19ebv+Pu5/wC/rf40wVE/3jQBMb+84/0u5/7+t/jS/brz/n8uf+/rf41WooAsfb7z/n7uf+/rf40G/vO13c/9/W/xqsaKALP2+8/5+7n/AL+t/jSG/vP+fy5/7+t/jVekNAFj7fef8/lz/wB/W/xpft95/wA/dz/39b/Gq1FAFuO/vCebu5/7+t/jT2vbvHF3c/8Af1v8apx9T9KkoAsfbbvH/H3c/wDf1v8AGvpv9i26uZ7rxNHNcSyRqkTBXcsAcn1r5cP0zX05+xMf9O8Uf9c4f5mgD6qX7z/X+lOpq/ef6/0p1ABRRRQAU1v9Yv0NOprf6xfoaAHUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBy2teAPDGt6lJqGqaRDcXsihXlLMGYDoDgjpWzDY2+maJ9isohDbQQlI4wSQqgcDmtCs6bUbWea/sYp0a7tog8sQ6oGB2k/XBoA+SfCs8K6LGGljBEkvBYD/lo1a/2mD/ntF/32K+cdVlkXVr4LI4H2iTof9o1W8+X/nq//fRoA+l/tMH/AD2i/wC+xXlHii1uJtevHigldGfIZUJB/GvPvPl/56P/AN9Gvuf4L28Mnww8PtJFG7G3BLMoJPJoA+RfsN3/AM+s/wD37NH2G7/59Z/+/Zr7v+yW3/PCL/vgUfZLb/n3i/74FAH52a1G8V8yyoyNgcMMGs/NeqftOIkfxUu1jVVX7PGcAY7GvJ6AJM01qbRQAUUlFACmkoooAKKKWgBKKdRQAJ94VPioovvirFADMUU+kk+4aAGZozUFFAFhfvr9a6NWGByPzrlAeasZPrQB0m4eo/OjcPUfnXN5PrRk+tAFiYjzW5HWm5HqKoknJoyaAL2R6ioXI3VXyaWgCbNGahooAlzRmoc0ZoAmzSGos0UASZozUdFAE8fU1JkVUHWl/GgC1kV9OfsTH/TvFH/XOH+Zr5Z/GvqP9iH/AI/PE/8A1zi/maAPq1fvP9f6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABXk3xA1rUW1bWF8P3mvvPpkKmZbQQrb277N4D7+WJBBIz0+tes1yGt/D7RdY1W6vrlr+NrxVS7ht7uSKK5CjaPMVSAeOPoOaAOVsfEOseLr7wlp0OoyaTHfaM2q3ctuq+ZKQyKI0JBAGWJNaevNq9vrHhvwrba9eR/bPPmn1F1QzskYyI1+XAPI5x0FbN54A0S50rSrJFurb+y4/Ksrm2uHinhXABAcHJBA5BzUl54G0m80a00+6k1CQ2khlgvGvJPtMbnOWEud3fp07YoA5/xVJq2kXHhrw5aa5el9YvZEfUJQjSxxpGX2KduMnGASM1h654g17QLHxzpMWrTXdxplpbXVlezKpkj81iCj4GDjbxxnBru5vA2lXGhppl3LqNyI5vtEd1Ndu1xHJ/fWTOQfpxSWngLRbfRdQ01lurhNQKtdz3Fw0k0xUjG5yc8Y4FAHL6uNcsLvw3oY8RXzTa3OzXF6yoGjVI9xSIbcLk/XjNHgm1ubH4geOba81F9RaO1tNk0oAk2lHIDYABIz1x0xXceI/DOneILGC2v1mX7O4kt5oJWjlhccBlcHIOKpaJ4Q0vw0up3WnC5e6vYwLme4naV5SoOGYsTzz/kUAfnNq5/4m19/18Sf+hGqlW9W/wCQtff9fEn/AKEaq0AIelfeHwU/5Jb4e/69h/M18IYzX3d8E2H/AAq3w9yP+PYfzNAHcUUmR6ijI9RQB8WftP8A/JVrz/r3i/ka8kr1v9p7n4rXn/XvF/KvJcGgBKKXBoxigBKKKKACilooASlFFFAC0UUUAOj++KsVXi++Ks0AJSP9w06mv9w0AVaSlpKAFHWp6gFT0AFFFFAEB6mkpT1pKACnUlFAC0UUUAIaSlpKACiiigAooooAUdaWkFLQAV9R/sQ/8fnif/rnF/M18uV9R/sQ/wDH54n/AOucX8zQB9Wr95/r/SnU1fvP9f6U6gAooooAKa3+sX6GnU1v9Yv0NADqKKKACiiigAooooAKKKKACiig0AGaM1wnxO1zV9F/ss6e09tp8sji8vYLT7U8AC/L8noT3waq2fiqdpfBiWWr2+rW+qX89vPdJAE3KlvK4XaPusGQZoA9FqG8P+hzf7jfyrzPxd4z1bTR4++xtBnRrS3mtN6Zwzgk7vUcCuv8PwaumhzTa7qEd5PcReYEjgEaw5XlR3Ye5oA/OTVv+Qrff9fEn/oRqrirWrH/AIm16P8Ap4k/9CNVaADFdDp/ifXLO0jt7XVr2GGMYVElICj6Vz1WY/uCgDoP+Ew8R/8AQb1D/v8AtR/wmHiP/oN6h/3/AGrBooA9y8AW0Gu+HY73WYkvrtnZTNON7EDoMmuj/wCEe0f/AKBtp/36FYPwl/5E+H/rq/8AOuyoAy/+Ee0f/oG2n/foV458arG1sdasks7eKBWhJIjXGTmvd68R+O//ACHbD/rgf50AeY4oxRRQAqgelPwPSmrTqADApGApaRqAG4oxS0UAKg+apKjXrUlABSrywz60lKn3x9aALPlp/dH5UnlJ/dH5U+igBhjTB+UflVSrrdDVKgBKKKKAIyKMUtJQAYqxGo2jiq9WY/uCgBdo9KNo9KWigCrOMPxUVS3H+sqKgAooooAKKKKALenqGZtwB4q95af3R+VUtO+8/wBKv0AM8tP7o/KvpT9i0BdS8TgDA8uH+Zr5tYgda+kv2LTu1LxOR/zyh/maAPqZfvP9f6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQAUUUUAYviLRrrVfJNnreoaU8ecm0EZDg/wB4OjfpisA/DjTY9EtbO1vtQt7u2vX1GPUEdfPFw4Id+V28hiCNuMV3NFAHBL8NdPNj4ggu9R1O7l1yOOO8nmdN52DAK4UAfTGPauzljEWmvGpJVIioJ9hVhXVs7SDg4OD0qO9/485v9xv5UAfmPqv/ACFb7/r4k/8AQjVWn605GsXwB4+0Sf8AoRqn5jetAFmrMf3BWb5jetOE8gH3qANKis3z5P71L9ok/vUAfRHwl/5E+H/rq/8AOuyr5e0vxdrWl2otrK9eKEHIUAVb/wCE/wDEn/QSk/IUAfS1eI/Hf/kO2H/XA/zrl/8AhP8AxJ/0EpPyFej/AAysoPG2m3V34mjF9cQyeXG78bVxnFAHiVFfUH/CvPDP/QMj/M0f8K88M/8AQMj/ADNAHzCtOr1X4y+G9K0K101tLtVgaV2DkHrgCvLMUANpGp+KkhRWf5hQBXorQ8iP+7R5Ef8AdoAoL1qStjRbOCfUI0kQMpzkV1P9iaf/AM+6/nQB59Sp98fWvQP7E0//AJ91/OquqaRZQ6dcSRQKrqhIOelAHKUVnefJ/epPPk/vUAaLdDVKo/Pk/vUzefWgCaiod5o3mgB9JTNx9aNx9aAH1Zj+4KqZNKJGAxmgC5RVTzW9aPNb1oAW4/1lRUrEscmkoAKKKKACiiigC5p33n+lX6x45GjPynGaf9ol/v0AatfSX7F//IU8Uf8AXKH+Zr5Y+0S/3q+oP2I3L3vicscny4v5mgD6sX7z/X+lOpq/ef6/0p1ABRRRQAU1v9Yv0NOprf6xfoaAHUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5VoOryaP4f8W3aXthZuviC5RZr4sY1y4GMLyT6AVo+BfGc/iC51zSb2WzubixgjmW5tY3jSRJAwGVfkEFD7VPe/D/zLOUWmpPDef2w2sQStEHVJGOdrLn5l/HNWND8J3el63rmvapq7ahfajaRwOohEUcQj3kBACSB83ck55zQB+dut/wDIZvv+u8n/AKEapVd1v/kM3/8A13k/9CNUqACiiigAooooAKKKKACvev2fP+Rf1D/ruP5V4LXvX7Pf/Iv6h/13H8qAPVqKKKAPJP2gf+PLSP8Aro/8hXite1ftA/8AHlpH/XR/5CvFaACpbf7/AOFRVLb/AOs/CgC1RRRQBo+H/wDkKRfjXZ1xnh//AJCkX412dABVLWf+QTd/9czV2qWtH/iU3f8A1zNAHl1FLSUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFfUv7EH/AB+eJ/8ArnF/M18tV9S/sQf8fnif/rnF/M0AfVq/ef6/0p1NX7z/AF/pTqACiiigAprf6xfoadTW/wBYv0NADqKKKACiiigAooooAKKKKACikYkDjrXnf/CZeI4fGGn6Fd6JYNJckySm3vS7QQjrI42jA9OeTQB6LRXmLfEjUfsL68miI3hNLgwG7+0fviofyzKI8Y2Z9845q5qnjTW31zW9O8PaHDejSo4ppJprnyldXQtheCS3FAHoVQ3v/HnN/uN/KvP734kpLpvh9tGtYZL7Wbc3Ucd5ciCOGMAbi7fU4AA5rY8JeKD4k0vVkuII7e/0+Rre5SKYSx7igZWRx1UqwPr2oA/ObW/+Qzf/APXeT/0I1Sq7rf8AyGb7/rvJ/wChGqVABRRRQAUUUUAFFFFABXV+EfGuq+GbSa303yfLkfe29M84rlKkjoA9F/4W34j9bX/v3R/wtvxH62v/AH7rz2igDpPFnjLU/E8VumpeViAkpsXHWua3GhqbQA7ea1fDlul5fNHLnaEJ4rIrd8If8hJv+uZoA6H+xLX/AG/zo/sS1/2/zrUooAt+BfDVjeeJrWGbzNjZzhvavYP+FeaL/wBN/wDvuvO/hv8A8jdZ/wDAv5V7rQBx3/CvNF/6b/8AfdYnjXwLpNn4S1a5i87zIrZ3XL8ZAr0yue+IX/Ij65/16SfyoA+L6SlpKACnYptPoATFGKWigBtJS0lABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABX1L+xB/x+eJ/wDrnF/M18tV9S/sQf8AH54n/wCucX8zQB9Wr95/r/SnU1fvP9f6U6gAooooAKa3+sX6GnU1v9Yv0NADqKKKACiiigAooooAKKKKAGyBijBMBscE9M15x4L8L+LNDv7ifUbnQ7yS+n8y9u9kvnyL2VecAAcAdK9JooA8qb4fa8NGk8KpqWnDwo9yZA3kv9qWEyeZ5PXb143enaut0zw1JY614kvBMhi1SOGOJMHMflxsnP511FFAHlP/AArO7tdK8MvZzaXcapo1s1o6X1uZLe4jbkgjqpB5BrsPDemXunaNerqNvpFtLIWYR6ZAY4wNvfPLH34rpqhvf+POf/cb+VAH5fa3/wAhm/8A+u8n/oRqlV3W/wDkM3//AF3k/wDQjVKgAooooAKKKKACiiigAqSOo6epxQBJRTN/tRv9qAHNTa0dHsF1B5A7lNozwK1P+Edj/wCezflQBzVbvhD/AJCTf9czVj/hHY/+ezflXZ/CvwRBq3iCSCS6eMCEtkLnuKAIaK9f/wCFTWf/AEEpv++BR/wqaz/6CU3/AHwKAOH+G/8AyN1n/wAC/lXuteY+IPDcfw+0uXxFazvdy2uMQyLtDZOOo+tcX/wvnUP+gPb/APfw/wCFAH0FXPfEL/kR9c/69JP5V49/wvnUP+gPbf8Afw/4Vn6/8aL7VtFvdPfSoI1uYmiLiQkjI69KAPIKSlNJQAU+mUuaAHUU3NGaACkoooAKKKKACiiigAooooAKKKKACiiigAooooAK+pf2IP8Aj88T/wDXOL+Zr5ar6l/Yg/4/PE//AFzi/maAPq1fvP8AX+lOpq/ef6/0p1ABRRRQAU1v9Yv0NOprf6xfoaAHVmeI9Zs/D+jz6jqLMtvCBkIMsxJwFUdySQBWnXKfE7w3N4p8KS2FqyLdJLHcRCRiquyMGCsRyAcYyKAMXwd4n3a5qUPiBdTsNQuUa8hivlCRi3TgiMKxA25BbPPNa3h3xr/bs8D2uh6tHpk4ZotQljVImUDO7G7cAexIrmNI8JvqM1yl/wCEzpDSWM9qL19TNy6eYu1ginOAR3OOgrY8JDxLBp1p4f1nQoYraCD7NJqMN2pR1C4DLHjcCeOD0oALT4mafcTW0p07UotGurj7Nb6rJEBbyMThSOdwUngEjBpdV+JFrZX2sW8Gj6tejSJNt7JbxLsiXaG3ZJGeD0HPBrmLfwv4on8KaR4JutNtotOsJ4Q+rLcgiWGJw67Y8ZDnaAc8Dmups/Dd/Da+PEZI92rTSPa/MPmBt1QZ9PmBoAt6r46srWXTYNNs73V7y/t/tcMFmgLCHj52JIAHOOvWqs/xL0iPQLHVEt7+UXd6dPFskP76O4GQY2XPByuPxFYWmeHfEXhefw9qmn6bFqc0GiRaVeWn2gRsrIQwZGPBGcg03T/BGtLbaVc3aW4vpfET6zeRRyZSFHDDap/iIyvPfmgDWj+JiSz3lnH4b119VswHnsRCm9IyMiTO7aQecYOSQa0o/E1nqWpeE57K8uFttUimmjjVF2SqI93zk8jHt3qTT9Fu4PHXiLU5FUWl7Z20MLBuSyeZuyO33xXO+GPB2q2MPgRLuONf7JguY7va4O0uhC49eaALN38VNOt7d77+ydWk0f7QLWLUUiBhlcvs45zt3Z5IxxXWWmv211qusWEaSibS0jeYkDDB1LDH4LXjPifT/Efhv4XJ4ZuNLtzpun3MCDU/tAPmwi4UriPGQ/IBzxwTXb6hYeJdK8Wa7d6LpUGo2utW0CCR7oRfZpI1ZfmBBLKQwPHPFAGm3xC059H0a8srO+vLjV1Z7SyhQGV1X7xOTgAdyTWnoPiS38RaZqJjt7m0urRmgubW5ULJC+3cAcEgggggg8g15qvw91e10LwdcTWMWoXukWklrd2KXbQFw5zujkGOQR0PBrufBWl/2fpeqSvoI0WW4bc0bXZuXlwmAzNz9MZ6CgD87Nb/AOQzf/8AXeT/ANCNUqu63/yGb7/rvJ/6EapUAFFFFABRRRQAUUUUAFKKSlFAC0UUUAdB4R/1tx9BXS1zXhH/AFtx9BXS0AFeifA//kbJf+vdv5ivO69E+B//ACNkv/Xu38xQB7vRRRQBwHx0/wCSa6p/wD/0IV8iV9d/HT/kmuqf8A/9CFfIlABSN900tI33TQBDSUtJQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV9S/sQf8fnif8A65xfzNfLVfUv7EH/AB+eJ/8ArnF/M0AfVq/ef6/0p1NX7z/X+lOoAKKKKACmt/rF+hp1Nb/WL9DQA6iignFABRXHzfEXQIrx4WlujAk4tnvRbObZJScbDLjGcnHpmt601uyu9Q1KzglLT6eUFwNpwu5dw578UAaVFcfdfETw/b2Oj3XnzyJq8Rlskigd3mAAOAoBOeelGo/ELRdPmEM6ag1wsC3M8MVo7vbRno0oA+T8aAOworlNU8faDp/9lbriW4bVYmlsVtoWlM4GPugDr8w4qS38caFP4futYN00NrayGGdZo2SSKTIGwoRndyMDvmgDp80Vw3gnxNHf3WrjUr2eO6X/AEoWl1btbi3tuisAwBYcZLepq7pHj7RNVv7a1t2u0+15FpNPbPHFckDJ8t2GG45oA6a6tobuExXMMc0RIJSRQwJByODUoGBXFTfE3w5FPOhmumjt7lrO4nS2cxW8ofZtd8YXn/GrmteO9H0jVpNMm+2XGoJCtwbe0tnmbyzn5vlHTg0AdVUN7/x5zf7jfyrn5/HGhxaDYasty89tqBC2iwxM8k7f3VQDJPBz6YqfSPEdh4g06/axMySWxaKeCeJopIm25AZWGRwQaAPzX1v/AJDN/wD9d5P/AEI1Sq7rf/IZv/8ArvJ/6EapUAFFFFABRRRQAUUUUAFLSUUALmjNJRQB6v8AAjwDc+O7zVYrS7itjaxox8wE5yT6fSvYf+GetV/6DFn/AN8NWD+xP/yFvE3/AFxh/m1fWNAHzZ/wz1qv/QYtP++Gqlqvhib4MW48Q6nOmoQyN9mEUAKsC3OefpX1BXhf7X//ACTW2/6/U/kaAPPf+GgdL/6A93/32tH/AA0Dpn/QHu/++1r5yooA9p+IfxhsPE3hS80uDTbiGSbbh3YEDBzXifme1Ok+4agoAl8welIz5HSo6KAFpKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvqX9iD/j88T/8AXOL+Zr5ar6l/Yg/4/PE//XOL+ZoA+rV+8/1/pTqav3n+v9KdQAUUUUAFNb/WL9DTqa3+sX6GgB1NkXchUEjIxkdqdRQB4FYeDp7PTZvD2u6Z4svUa4kx9hvV+yTI0hcNgsNvXkEdRXXSR6z4c8XeJHtdDvNRg1eKE20sDJtR0j2FZCSCvQHODxXp9FAHkPg3wvq9m/wzN7YOn9labNDd52/uZCigA89evTNaMy6t4W8Y+J7yLQbvV7bWVhkt5LbYcOiFTHJuIwvQg8jk16bRQB4JDYah4O1/4aWk1hJqF7aadfefDbkMy7thOzJAO3OOoyM1e1zwhrniTS9c1dLK5sprvVra/h08zCOZ4oUVD8wyFdsEj04zXsM+mWc+qW2oywI17bI8cMp6orY3AfXAq5QB45b+Ex4gh1pYrTxTaahPpU9jHd6xch0TzMfIAGJPIBz0wD61F4V8Ol73QbXV9J8XfatOkjk3T3qvaRSIMBx83K9cADPNe0UUAeT3XhnVW+FHi/TEsG/tG+vb6aCIbd0gedmQ9ccrg9a3/DujX1t8RNU1K4tmS1l0q0t45Tjl1L7l9eMiu5ooA8HbwTrcHh7wndSWmpB9Kubz7RaWM4iuBHK7bXQ5wSOOM9Ca7jwHpcFtFrWoR2WvW890qo8mrzCR5QqnBGCcAZxzXoFQ3v8Ax5zf7jfyoA/L7W/+Qzff9d5P/QjVKrut/wDIZv8A/rvJ/wChGqVABRRRQAUUUUAFFLRigBKKXFFACUUUtAH0z+xP/wAhbxN/1xh/m1fWNfEH7NPxA0XwFqGtS681wFuo41j8qPfyCc5/Ovev+GjPA3/PTUP/AAH/APr0Aey14X+1/wD8k1tv+v1P5Gr/APw0Z4G/v6h/4D//AF68w/aC+LPhvxx4Mh03Q2ujcrcrKfNi2jaAe+aAPnCinbTRtNADJPuGoKsSKdpqLyzQAyin+Was6bYTahfwWlvt82Zwi5OBk0AU6K9C/wCFSeJv7lp/3+/+tR/wqTxN/ctP+/3/ANagDz2ivQX+E3iVEZilrhRk/vv/AK1cwfDd+CQRHkcfeoAxaK2v+EcvvSP/AL6o/wCEcvvSP/vqgDFop8sbRyMjdVODTcUAJRS4ooASiiigAooooAKKKKACiiigAooooAK+pf2IP+PzxP8A9c4v5mvlqvqX9iD/AI/PE/8A1zi/maAPq1fvP9f6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGbr2tWeh2YudQd1QsEURxtIzN6BVBJqrpXiDTfEWk3VxpVx5yxho5VKlHjbGdrKQCDgg81hfE6bV44tIGnHUI9Oe6xqMunoGnSLacbQQeN2MkDOKx/hrZX0Gr+Mrq4tNVgsrowtbPqTbpZgItpY/iOnXGKAPgnW/wDkM33/AF3k/wDQjVKrut/8hm+/67yf+hGqVABRRRQAUUUUAOXpS0i9KWgApGpaRqAG0tJS0AWLXq1WarWvU/SrNABTX7U6mvQA2iiigBr/AHaZT3+7UdAC1teCf+Ru0j/r5T+dYlbfgn/kbtI/6+U/nQB9VUtIKWgCK6/49Zf9xv5V4RL/AKx/qa93uv8Aj1l/3G/lXhEv+tf/AHjQAyiiigDzq/8A+Pyf/fNQVPf/APH7P/vmoKACkpaSgAooooAQ0lKaSgApRSUooAWiiigAooooAK+o/wBiH/j88T/9c4v5mvlyvqP9iH/j88T/APXOL+ZoA+rV+8/1/pTqav3n+v8ASnUAFFFFABTW/wBYv0NOprf6xfoaAHUUUUAFFFFABRRRQAUUUUAFFFFABRWfr2rWeiaXNfajPFBBEM7pHCAnsMnjJrj2+IVnpngD/hJdVu7C4QzLHss5gVUu4ATcTyyg5P0NAHf4qG8GLOf/AHG/lWJfeMNFt/DkutRalZzWShgjrOu2Rx/AGzjOeKXwzqh1jwlHeyXVndSyxMzvZvujU8/KDnnHSgD82tb/AOQzf/8AXeT/ANCNUsVe1r/kMX//AF8Sf+hGqdADcUYp1FADcUYp1FACDijNI3WkoAdmkJpKKAClpKKAJ7dwpO44qfzk/vCqNFAF7zk/vCmtKh/iFU6KALfmr60eYvrVSigC00iletR7xUNFAE28VteDJUTxXpTOcKLhCT+Nc/Wn4a/5GDT/APrsv86APqz+1rL/AJ7rR/a1l/z3WuKooA7KfVLN4JFWZSzKQB+FeUSaDqZkYi1fBOe1dPH99frXSr0FAHmP9gan/wA+j/pR/YGp/wDPo/6V6fRQB8talE6ahcK64ZZCCKrbG9K1te/5Dd9/12b+dUKAIdjelMxg81Zqu/3jQAlFFFACGkpTSUAFKKSnLQAYopaKAEooNFABX1H+xD/x+eJ/+ucX8zXy5X1H+xD/AMfnif8A65xfzNAH1av3n+v9KdTV+8/1/pTqACiiigAprf6xfoadTW/1i/Q0AOooooAKKKKACiiigAooooAKKKKAPLfjFbXY1jwrqJkkj0qzmlNzItr9pELsmEkaPuByM9s1zeraVbX/AIK8Y3mk302syXEtlNIkWn+SimKVWZkXHzMUBzj+6K92IB6jNIFAGAAPwoA8W8b3UN/rnhXXtOmeDw/bi4ikmGnGRbeYgYZ4yMjgEbscfjXUfDm1so7TxFeadqT36Xs3myMLT7NGHEQU7FwBzjkjvXoO0YxgY+lQ3gAs5sAD5G6fSgD8wda/5DF//wBfEn/oRqnVzWv+Qxf/APXxJ/6Eap0AFFFOCsegNADaKdsb+6aNjf3TQBG3WkpzAg802gAooooAKKKKACiiigAooooAKKKKACiiigArT8Nf8jBp/wD12X+dZlaPh11j1yxd2Cqsykk9BzQB7zRVL+1bD/n8g/77FH9q2H/P5B/32KAL6ffX610q9BXHQ6nYtKirdwFiwAAcc13q6XfFQRaTkEf3DQBUoq5/ZV//AM+c/wD3waX+yr//AJ85/wDvg0AfLWvf8hu+/wCuzfzqhV/xF8mvX6vwwnYEHtzWfuHrQAtV3+8an3D1qBj81ACUUlFAAaSlpKACnLTaVaAHUUZozQAhooJpKAFr6j/Yh/4/PE//AFzi/ma+W6+pP2If+PzxP/1zi/maAPq1fvP9f6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQBHNPHAu6aRI0/vOwAqpcagY7+xt47aWaO537p0wUi2rkbvr0FcB8VdIhv9e0i8mudDuDaQzf8SrWJNkUwYr+8U54YYxkgjDGsrw/qdrql/8ADm50vThptuf7QRLVW3IpWIj5T3XIyD6UAevfaYfMMfmx7xyV3DIpJLmKL/WyInOPmYCvmbUF8PN8L47rUZIh46e/T7ad5+1Gf7QA6tznZjt0xivQ/wDhGdJ8R/Ejxr/bVqLyOGxsVjjkYlELJLlgOm7jg9RzQB6jqF9FY2M91PnyoULttGTgDsKp6Ze3Go+HFu7u2FrLNEz+Tv3lAc4BPrjGfevCPtN5qOg/DiDVJLObSZLedX/tORxbyzIcRrIR1O0HAPBNelfDXTf7L0/xBFBe6ZLZSTeZFa6dK0kVofLG5RknGSC2OgzQB+f2tf8AIYv/APr4k/8AQjVOrms/8hi//wCviT/0I1ToAKv2/wDqlqhV+3/1K0ASUUUUAULr/XGoKnuv9cagoAKKKKACiiigAooooAKKKKACiiigBaWkpaACjNFJQA7J9aMn1NFFAFzRz/xNrLn/AJbp/wChCv0iswPskHH8C/yr83dH/wCQtZf9d0/9CFfpFZ/8ekH+4v8AKgCbA9BRgelFFAH5zeOv+Ry1v/r8l/8AQjWFW546/wCRy1v/AK/Jf/QjWFQAtJRRQAUUUUAFFFFABRRRQAUUUUAFFFFABX1L+xB/x+eJ/wDrnF/M18tV9S/sQf8AH54n/wCucX8zQB9Wr95/r/SnU1fvP9f6U6gAooooAKa3+sX6GnU1v9Yv0NADqKKKACiiigAooooAKKKKAM/VNG03VvL/ALTsLS78v7nnxK+36ZFWEsrePyNkEK+QCItqAeWCMHb6cVYooA4PVvAEmsaqZtT1SOSwa4WdoI7KOOSTa25UeUclQQOwziu2W3iSWSRYoxJIAHcLywHQE98ZNTUhOOvFAFOfSrCex+xTWVtJZ9PIaMFPy6VHFptnpmmTwadaW9rDsY7IYwi5x6CtEnFQXhzZzEHI2N/KgD8wda/5DF//ANfEn/oRqnVvWv8AkMX/AP18Sf8AoRqnQAtX4P8AUryKz6TJ9TQBqZHqKMj1FZeT6mjJ9TQBNdf641BS0lABRRSigBKKdRQA2inUUANop1IaAEooooAWlpF61JigBlFPxSr94UAMoqzgUYFAD9I/5C1ln/nun/oQr9HrOeH7JD+9j+4v8Q9K/N9RggjgitUalff8/lx/38NAH6IfaIf+esf/AH0KXz4f+esf/fQr87v7Svv+fy4/7+Gj+0r7/n8uP+/hoAb45ic+MNaIRiDdyEHB/vGsPyZP+eb/APfJr7G8N2ltJoOnvJBEztAhJKAk8CtH7Baf8+0P/fAoA+KPJk/55v8A98mmsjKcEEH6V9tfYLT/AJ9of++BXzp8RIo08ZamqIqqJOAB04oA8vwfQ0YPoa6zYv8AdH5UbF/uj8qAOTxRWnqgAuiAAOKp4oAgoqfFMk60ARUU/FGKAG0U4UtADK+pP2IP+PzxP/1zi/ma+Xq+ov2Iv+P3xP8A9c4f5mgD6sX7z/X+lOpq/ef6/wBKdQAUUUUAFNb/AFi/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQAUUUUAFeL+MRqOsfFbUNMmtbO8s7bT4ZLW2vNSktEJYtvkUIp3sCAM9vxr2isvW/D+ka6qLrGnW14IzlPOjDFfoetAHkZ0rUpG+H+ja/qLTBry6SU2d67iWJUYqjSDaWxwD9K6zwTbJpWu+N9Hs2kGnWssMlvC8rSCLzLdWYKWJIG7Jx712ltoum2qWiW9jbxLaZ+zhIwPKz12+mcmnz2dvCl7cRRIk06ZlcLgvhcDJ74FAH5ja1/yGL/8A6+JP/QjVOrmtf8hi/wD+viT/ANCNU6ACkpaSgBKKKKACiiigApRSUooAWiiigAooooAKQ0tIaAEooooAcv3qkqNfvVJQAUL94UUL94UATUUUUAKKtVVFWqACijijigD668L/APIu6b/1wT+QrTrM8Mf8i7pv/XBP5CtOgAr5v+I//I6ap/11/pX0hXzf8Rv+R01T/rr/AEoA5qijFGKAMTVf+Ps/QVTq7qv/AB9n6CqVABTJOtPpknWgBtFFFAAKdSCigAr6i/Yi/wCP3xP/ANc4v5mvl2vqL9iL/j98T/8AXOH+ZoA+rF+8/wBf6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQAUZFFeSeP7qfUtc1qLR/7a+0aRbq000Oq/ZLeBihdTs5DnHJyCO1AHreagvryCxs57q6cJBCjSOx7KBk15DpWsaj4yfwNY6hqd1Y2+o6O+oXLWcpge5lXYu0OvIHzFiBjr6VTuNb1WzsBp/8Aat1c29h4vtdOF275aW3baTHIw+9gttJPXHNAHq+h+JLDV9Klv4jJbxQlhMlynlvFjk7genHP0q+1xDd6Ubi1kWWCWHfHIhyGUjIIPoa828aXD3viXxNpK3kq2q+HDM0UMhUrJvOG47kAD6Vt/DLS4tP+HWmyRXV5P9o0+KQi4uGlC/uxwoP3R7CgD88tb/5DN/8A9d5P/QjVKrmt/wDIZvv+u8n/AKEapUALSUUUAFFFFABRRRQAUtJRQAtFJRQAopaQUtABSGlpDQAlFFFAC0ZNJRQAuTRk+tJRQA7e3qaN7epptFAD97f3jS+a/wDeP51HTqAHea/94/nR5r/3j+dNooA24vF2vwxrHFrF6qKNqqJTgCn/APCZeI/+g1ff9/TXP0UAdB/wmXiL/oNX3/f019m/Brwxomu/DXQ9S1nS7S9v7iHdLcTRhnc5PJNfCdfoL+z/AP8AJIfDn/Xv/wCzGgDc/wCEB8Kf9C/pv/fkUf8ACA+FP+hf03/vyK6aigD4Y/aZ0ux0j4ly22mWsVrbi3Q+XEu0ZNeTV7L+1d/yVab/AK9o68aoAKjl+9UlRy/eoAZmjNJRQAuaM0lFAC5r6k/Yg/4/PE//AFzi/ma+Wq+pf2IP+PzxP/1zi/maAPq1fvP9f6U6mr95/r/SnUAFFFFABTW/1i/Q06mt/rF+hoAdRRRQAUUUUAFFFFABRRRQAVzeseCfD+r6lLf6hpyy3MqhJSJHUSqOgdQQGx75rpCcVnWGuaXqF1Na2Go2lzcwDMsUMyuyfUA5FAGZdeCtAutLsNOm05Pstgu21COyNCOmFZSGH51J/wAIfoI8PPoY0yEaY7b2h55bOd27Od2ec5zVq/8AEejadfx2V/qllbXkuNkMsyq7Z6cE5qXWNc0zRYEm1e/trKJ22q88gQMfQZoAoaL4P0LRriafT9PWOeaLyJZHdpGkTOdrFiSfxp+l+HdM8PafdxaPbfZoZEJKB2KjAOMAk4HsKuXGt6XbaWupXGoWkensAVuGlURsD0w2cGorPWdN1rS7ifSL62vYVVlLwSBwDjocdDQB+Z2t/wDIZvv+u8n/AKEapVd1v/kM3/8A13k/9CNUqACiiigAooooAKKKKACiiigAooooAUUtNpaAFpDRSUAFFFFABRRRQAUUUUAFFFFABTqbS0ALRSUUAJRRRQAtfoL+z/8A8kh8Of8AXv8A1Nfn1XoXh/4w+M9A0e20zS9U8qzt12xp5YOBQB+gtFfBf/C+fiB/0Gf/ACEv+FH/AAvn4gf9Bn/yEv8AhQBs/tXf8lWm/wCvaOvGq+wfhD4Q0f4s+EU8SeOLb7fq7StCZtxT5V6DArt/+FB/D7/oDf8AkVv8aAPgeo5etffn/Cg/h9/0Bv8AyK3+NfNv7UXgrQ/BXiLSbbw9afZop7cu43FsndjvQB4jRRRQAUUUUAFfUv7EH/H54n/65xfzNfLVfUv7EH/H54n/AOucX8zQB9Wr95/r/SnU1fvP9f6U6gAooooAKa3+sX6GnU1v9Yv0NADqKKKACiiigAooooAKKKKAEYBgQeQa8w8PaZY6V8ctag02zt7SFtEgdkgjCAsZnySB1PvXp5Ga4i1+G+mW3iQ66l/rLagcBma9YqyhtwQjuuSeKAOC8T6dcaNH8RBqvhqfVG1SR7m0v1RGjWPylCq7k5TYVJ/HirOlW2p/aPBXiTVtHutasv7BFq8MMayyQTNtbzNjEZ3KME9q7/XfAthrl/LPqN5qcltMVMtiLphbvgAcqO3HTODV3xD4Yh1mO2QX2oWCW6lFFjOYsqcZBwPYY9KAPP8A4USWk3hrxK3iHT7a00ex1m4lggulVkt1B3Y7qNpJ6dDmtPwFYm81nxH4ptbH+zdK1K3jhtINnlmdYw3+kMo6Ft2BnnABre1L4e6Ff+EofDbRzw6ZHIJdsMxVnYHOWbq2Tyc9asaL4Xg8Pw3bwX+p3QkhKbbu5MqrgHoD0oA/N/W/+Qzff9d5P/QjVKrut/8AIZv/APrvJ/6EapUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFLQAlFLRQAlFLRQAlFLRQAlFLRQAlOpKWgAooooA+5/wBkf/kksX/XzJXtdeKfsj/8klh/6+ZK9roAK+PP22P+Rt0L/r0b/wBCNfYdfHv7bH/I26F/16N/6EaAPm2ilxRigBKKWkoAK+pf2IP+PzxP/wBc4v5mvlqvqX9iD/j88T/9c4v5mgD6tX7z/X+lOpq/ef6/0p1ABRRRQAU1v9Yv0NOprf6xfoaAHUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAVDe/wDHnN/uN/Kpqhvjizm/3G/lQB+Y+rQq+rXzEn/j4k/9CNVfs6+pq7qbD+1L0f8ATxJ/6EagoAh+zr6moHjAYgVdqrJ980ARbKNlPooAaIx70vlj3pw6UtADPLHvTWXBqWo5PvUAMxRilooAMUYoFLQAmKUKDRSr1oANg96Nn1p9FACJGCwBzUvkJ6mmx/fFT0AReQnqadHbI0ijJ5OKfUkH+uT60AWP7Lh7s/50f2XB/ef86v0UAZ50uHH3n/Oov7Pi/vNWo3Q1BQBS/s+L+81H9nxf3mq7RQA1dGgKgln/ADpf7Gt/77/nWnH/AKtfpTqAMv8Asa3/AL7/AJ1j3kCw3DxrkhTjmusrmdT/AOP6X60AU9lGynUUAeqfDr41+IPAvh1dI0q3spLdXaQNMhLZPXvXUf8ADUHjD/nz0z/v2f8AGvBBRQB73/w1B4w/589M/wC/Z/xrm/E/ie7+LVxFf+IFihmtF8lBbDAIPPNeUV3Xw/8A+PG5/wCun9KAH/8ACGaf/wA9Jvzo/wCEMsP+ek35109LQB534n0G20uOFoGkbeSDuNc/5K+tdv4+/wBRa/7x/lXG0AReSvrX09+xIoW+8Tgf884f5mvmUnFfTn7E5/07xR/1zh/maAPqlfvP9f6U6mr95/r/AEp1ABRRRQAU1v8AWL9DTqa3+sX6GgB1FFFABRRRQAUUUUAFFFFABRRRQAUUZFISAMkgD3oAWsi506RNSvdQN9cNFJaiEWhP7pCNx3gf3jnB+la9Q3v/AB5z/wC438qAPzL1L/kKXv8A18Sf+hGq9WNS/wCQpe/9d5P/AEI1XoAKqyffNWqpytiQ0AFFM3+1G/2oAkHSlqPzPajzPagCSo5PvUeZ7U1mzQAUUlFACilpopaAFpV602lBxQBJRUe+jfQBNH98VPVRZMN0qTz/APZoAnqSD/XJ9aqef/s05Lna4bb0OaAOgorK/tU/88/1o/tU/wDPP9aANRuhqCqX9qnB/dj86i/tI/8APMfnQBpUVm/2kf8AnmPzo/tI/wDPMfnQB0kf+rX6U6sVNYIUDyu3rS/2yf8Anl+tAGzXM6l/x/S/Wrv9sn/nl+tVJV+0yNKeNxzigCnRVn7P/tUfZ/8AaoAriinum1sUmKAG13Xw/wD+PG5/66f0rh8V0fhjVTp1vKgjD7mz1oA7+lrmf+Emb/n3H50f8JM3/PuPzoAg8ff6i1/3j/KuNr1Pw34fHxBkmhknNn9lAbIXduzxW7/wo6P/AKDDf9+v/r0AeH19N/sT/wDH/wCKP+ucP8zXKf8ACjo/+gw3/fr/AOvXp/7M/hoeFfF3ijTluDOBb28m8jHUmgD6FX7z/X+lOpq/ef6/0p1ABRRRQAU1v9Yv0NOprf6xfoaAHUUUUAFFFFABRRRQAE4rz3/hZCazeXFh4K0yXWLqBzFLLJILeCJh1yW+Y49lr0Kub8QeCtB1tjLdWKxXnVbq2YwzKfUOuD+eaAMXxZ4xvvCXh/TH1ldMGsahdC2jUSMlvGSGbczHJwFX8TjpmsWH4ozLpnifemm395o9iL6OWxkYwTqcjac8qwI6ZPFdJrPgf+0NE0y0GqXTX2mXP2q0vrpVmdWwy7XHAddrEH885qK48I6tqnhvWdL1nU7AnULc26NZ2HlCLPVjliW+nFAENl4r1y08Q6LZ+IbGxhtdYikeD7PIzPAyIHKPnhuM8jHSuN8ceJvEGv8Aw3uNXisLOHQrq7gjiCyP9oWMXSASE/dwSMbeoB616Zq/hkajrXhzUGudn9kGU+X5eRLvj2evGOveuRvPhpqc2gt4dh8RCLw8lwtxDD9k3TKBKJBGz7uVyD2B6fiAdT4iv/EumXXnaZpdnqmmBAWiE/k3CnvjdlG/SofDPjnSfFcepWth9oiv7JMXNtOmGiJBxyMqfwNT6/4Ns/EGopPq13qE1qqhRYpcGOAkd2C4LficVq2+k6fpOlzQaXZW9pEIz8kMYQdPagD82NTI/tS95/5eJP8A0I1BVjUv+Qpe/wDXxJ/6EarZ5HvQAtUZv9YavVRn/wBYaAI6KKKACiiigAooooAKKKKAFFLSCloAKQ0tIaAEooooAUdadTRTqACiiigAooooAKZT6ZQAUtJS0ATr90UUL0FFABVyH/ViqlW4f9WKAH0UmeQPWloArzffqOpJvv1GTigAq9p/3X+tUavaf91vrQBbopGIXr9KWgD1v4A/8f2q/wDXNf517NXjPwB/4/tV/wCua/zr2C8uYrO2ae4bbGuMkDPU4/mRQAXV1BaR+ZdTRxJnG52AGatfBm5gu/iN4pktpUlj+yWw3I2R/FUMkaSriRFcdcMM1a+D8aR/EjxSsaKi/Y7XhRj+9QB7Kv3n+v8ASnU1fvP9f6U6gAooooAKa331+hp1Iy57kfSgBaKbsP8Afb9KNn+236UAOopuz/bb9KNn+236UAOopuz/AG2/SjZ/tt+lADqKbs/22/SjZ/tt+lADqKbs/wBtv0o2f7bfpQA6im7P9tv0o2f7bfpQA6o54/NidM43KVz9ads/22/SjZ/tt+lAHyLqH7MHiea/upYda0jypJXdd3mA4JyM8dar/wDDLfirj/ic6Nx/10/+Jr7C2n++36f4UbT/AH2/SgD49/4Zc8Vf9BnRv/In/wATUTfsq+KWYk61o/8A5E/+Jr7H2n++36UbT/fb9KAPjb/hlPxR/wBBvR//ACJ/8TR/wyn4o/6Dej/+RP8A4mvsnaf77fpRtP8Afb9KAPjb/hlPxR/0G9H/APIn/wATR/wyn4o/6Dej/wDkT/4mvsnaf77fpRtP99v0oA+Nv+GU/FH/AEG9H/8AIn/xNH/DKfij/oN6P/5E/wDia+ydp/vt+lG0/wB9v0oA+Nv+GU/FH/Qb0f8A8if/ABNH/DKfij/oN6P/AORP/ia+ydp/vt+lG0/32/SgD42/4ZT8Uf8AQa0f/wAif/E0v/DKnij/AKDWj/8AkT/4mvsjaf77fpRtP99v0oA+N/8AhlTxR/0GtH/8if8AxNH/AAyp4o/6DWj/APkT/wCJr7I2n++36UbT/fb9KAPjb/hlPxR/0G9H/wDIn/xNH/DKfij/AKDej/8AkT/4mvsnaf77fpRtP99v0oA+N/8AhlTxR/0GtH/8if8AxNH/AAyp4p/6DWj/APkT/wCJr7I2n++36UbT/fb9KAPjf/hlTxT/ANBrR/8AyJ/8TR/wyp4p/wCg1o//AJE/+Jr7I2n++36UbT/fb9KAPjf/AIZU8U/9BrR//In/AMTR/wAMqeKf+g1o/wD5E/8Aia+yNp/vt+lG0/32/SgD43/4ZU8U/wDQa0f/AMif/E0n/DKfij/oN6P/AORP/ia+ydp/vt+lG0/32/SgD42/4ZT8Uf8AQb0f/wAif/E0f8Mp+KP+g3o//kT/AOJr7J2n++36UbT/AH2/SgD45/4ZX8Uj/mNaP/5E/wDiaP8AhlfxTn/kNaP/AORP/ia+xtp/vt+lG0/32/SgD45/4ZX8U/8AQa0f/wAif/E1Kv7LvitVwNZ0b/yJ/wDE19g7T/fb9KNp/vt+lAHx+f2XfFRIP9s6Nx/10/8AiaP+GXvFf/QZ0b/yJ/8AE19gbT/fb9KNp/vt+lAHx4/7LXipjk6zo3/kT/4mm/8ADLHin/oNaP8A+RP/AImvsXaf77fpRtP99v0oA+Ov+GWPFX/Qa0f/AMif/E1PD+zB4riBC6zoxz6+Z/8AE19fbT/fb9KNp/vt+lAHyI37MfixgM6xovBz/wAtP/iaX/hmXxZ/0GNF/wDIn/xNfXW0/wB9v0o2n++36UAfNXgv4J+NvCk1xJaX/h6czgKRKZuMfQV0WpfD7x/f2clvJP4YVXwcqZ88EH09q9z2n++36UbT/fb9KAPFR4J+IWP9b4X/ADn/AMK6X4Y+DNa0DXNY1bxBcae9xexxRJHZB9qqmeSW5zzXou0/32/Sjaf77fpQAJ95/r/SnUirjPJOeeaWgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD//2Q== ">
<center>Figure3: The model paid attention correctly when outputing "European Economic Area". In French, the order of these words is reversed ("européenne économique zone") as compared to English.</center>
This [link](https://huggingface.co/models?search=helsinki) lists out the models that allows translation from a source language to one or more target languages. To get a list of supported transformer backbones for this model use the below command.
```
print(TextTranslator.supported_backbones)
```
Sample code to instantiate the model object and translate a Spanish language text in German language.
```
translator_german = TextTranslator(target_language="de")
text = """La cobertura terrestre describe la superficie de la tierra. Son útiles para comprender la planificación
urbana, la gestión de recursos, la detección de cambios, la agricultura y una variedad de otras aplicaciones."""
print(translator_german.translate(text))
```
Sample code for translating English language text to French language.
```
translator_french = TextTranslator(source_language="en", target_language="fr")
text_list = """Land cover describes the surface of the earth. They are useful for understanding urban planning,
resource management, change detection, agriculture and a variety of other applications."""
print(translator_french.translate(text_list))
```
## TextGenerator
**TextGenerator** model can be used to generate sequence of text for a given incomplete text sequence or paragraph. These models are trained with an autoregressive language modeling objective and is therefore powerful at predicting the next token in a sequence. Visit [this](https://huggingface.co/models?search=&filter=lm-head) link to learn more about the available models for **text-generation** task. To get a list of supported transformer backbones for this model use the below command.
```
print(TextGenerator.supported_backbones)
```
Sample code to instantiate the model object and using it to generate sequence of text for a given incomplete sentence
```
text_gen = TextGenerator()
text_list = ["Hundreds of thousands of organizations in virtually every field are using GIS to make maps that"]
pprint(text_gen.generate_text(text_list, num_return_sequences=2, max_length=30))
```
## FillMask
**FillMask** model can be used to provide suggestion for a missing token/word in a sentence. These models have been trained with a [Masked Language Modeling (MLM)](https://huggingface.co/transformers/task_summary.html#masked-language-modeling) objective, which includes the bi-directional models in the library. Visit this [link](https://huggingface.co/models?filter=lm-head) to learn more about the available models for **fill-mask** task. To get a list of supported transformer backbones for this model use the below command.
```
print(FillMask.supported_backbones)
```
Sample usage to get suggestions for a missing word in a sentence
```
fill_mask = FillMask()
# original text - This deep learning model is used to extract building footprints from high resolution satellite imagery
text_list = ["This deep learning model is used to extract building footprints from high resolution satellite __."]
fill_mask.predict_token(text_list, num_suggestions=4)
```
# References
[1] [BERT Paper](https://arxiv.org/pdf/1810.04805.pdf)
[2] [Summary of the models](https://huggingface.co/transformers/summary.html)
[3] [The Annotated Transformer](http://nlp.seas.harvard.edu/2018/04/03/attention.html)
[4] [Text Summarization with Machine Learning](https://medium.com/luisfredgs/automatic-text-summarization-with-machine-learning-an-overview-68ded5717a25)
| github_jupyter |
##### Copyright © 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TFX Component Tutorial
***A Component-by-Component Introduction to TensorFlow Extended (TFX)***
Note: We recommend running this tutorial in a Colab notebook, with no setup required! Just click "Run in Google Colab".
<div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/tfx/tutorials/tfx/components">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a></td>
<td><a target="_blank" href="https://colab.sandbox.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/tfx/components.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/tfx/tree/master/docs/tutorials/tfx/components.ipynb">
<img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td>
</table></div>
This Colab-based tutorial will interactively walk through each built-in component of TensorFlow Extended (TFX).
It covers every step in an end-to-end machine learning pipeline, from data ingestion to pushing a model to serving.
When you're done, the contents of this notebook can be automatically exported as TFX pipeline source code, which you can orchestrate with Apache Airflow and Apache Beam.
Note: This notebook and its associated APIs are **experimental** and are
in active development. Major changes in functionality, behavior, and
presentation are expected.
## Background
This notebook demonstrates how to use TFX in a Jupyter/Colab environment. Here, we walk through the Chicago Taxi example in an interactive notebook.
Working in an interactive notebook is a useful way to become familiar with the structure of a TFX pipeline. It's also useful when doing development of your own pipelines as a lightweight development environment, but you should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts.
### Orchestration
In a production deployment of TFX, you will use an orchestrator such as Apache Airflow, Kubeflow Pipelines, or Apache Beam to orchestrate a pre-defined pipeline graph of TFX components. In an interactive notebook, the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells.
### Metadata
In a production deployment of TFX, you will access metadata through the ML Metadata (MLMD) API. MLMD stores metadata properties in a database such as MySQL or SQLite, and stores the metadata payloads in a persistent store such as on your filesystem. In an interactive notebook, both properties and payloads are stored in an ephemeral SQLite database in the `/tmp` directory on the Jupyter notebook or Colab server.
## Setup
First, we install and import the necessary packages, set up paths, and download data.
### Install TFX
> #### Note
> Because of package updates, you must use the button at the bottom of the output of this cell to restart the runtime. Following restart, please rerun this cell.
```
!pip install tfx==0.15.0 tensorflow==2.0 tensorboard==2.0.0 grpcio==1.24.3
```
### Import packages
We import necessary packages, including standard TFX component classes.
```
import os
import pprint
import tempfile
import urllib
import tensorflow as tf
tf.get_logger().propagate = False
pp = pprint.PrettyPrinter()
import tfx
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.proto.evaluator_pb2 import SingleSlicingSpec
from tfx.utils.dsl_utils import csv_input
from tensorflow.core.example import example_pb2
%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip
```
Let's check the library versions.
```
print('TensorFlow version: {}'.format(tf.__version__))
print('TFX version: {}'.format(tfx.__version__))
```
### Set up pipeline paths
```
# This is the root directory for your TFX pip package installation.
_tfx_root = tfx.__path__[0]
# This is the directory containing the TFX Chicago Taxi Pipeline example.
_taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline')
# This is the path where your model will be pushed for serving.
_serving_model_dir = os.path.join(
tempfile.mkdtemp(), 'serving_model/taxi_simple')
```
### Download example data
We download the example dataset for use in our TFX pipeline.
The dataset we're using is the [Taxi Trips dataset](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) released by the City of Chicago. The columns in this dataset are:
<table>
<tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>
<tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>
<tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>
<tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>
<tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>
<tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>
</table>
With this dataset, we will build a model that predicts the `tips` of a trip.
```
_data_root = tempfile.mkdtemp(prefix='tfx-data')
DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'
_data_filepath = os.path.join(_data_root, "data.csv")
urllib.request.urlretrieve(DATA_PATH, _data_filepath)
```
Take a quick look at the CSV file.
```
!head {_data_filepath}
```
*Disclaimer: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.*
### Create the InteractiveContext
Last, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook.
```
# Here, we create an InteractiveContext using default parameters. This will
# use a temporary directory with an ephemeral ML Metadata database instance.
# To use your own pipeline root or database, the optional properties
# `pipeline_root` and `metadata_connection_config` may be passed to
# InteractiveContext. Calls to InteractiveContext are no-ops outside of the
# notebook.
context = InteractiveContext()
```
## Run TFX components interactively
In the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts.
### ExampleGen
The `ExampleGen` component is usually at the start of a TFX pipeline. It will:
1. Split data into training and evaluation sets (by default, 2/3 training + 1/3 eval)
2. Convert data into the `tf.Example` format
3. Copy data into the `_tfx_root` directory for other components to access
`ExampleGen` takes as input the path to your data source. In our case, this is the `_data_root` path that contains the downloaded CSV.
Note: In this notebook, we can instantiate components one-by-one and run them with `InteractiveContext.run()`. By contrast, in a production setting, we would specify all the components upfront in a `Pipeline` to pass to the orchestrator (see the "Export to Pipeline" section).
```
example_gen = CsvExampleGen(input=csv_input(_data_root))
context.run(example_gen)
```
Let's examine the output artifacts of `ExampleGen`. This component produces two artifacts, training examples and evaluation examples:
Note: The `%%skip_for_export` cell magic will omit the contents of this cell in the exported pipeline file (see the "Export to pipeline" section). This is useful for notebook-specific code that you don't want to run in an orchestrated pipeline.
```
%%skip_for_export
for artifact in example_gen.outputs['examples'].get():
print(artifact.split, artifact.uri)
```
We can also take a look at the first three training examples:
```
%%skip_for_export
import tensorflow_data_validation as tfdv
# Get the URI of the output artifact representing the training examples, which is a directory
train_uri = example_gen.outputs['examples'].get()[0].uri
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
decoder = tfdv.TFExampleDecoder()
# Iterate over the first 3 records and decode them using a TFExampleDecoder
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = decoder.decode(serialized_example)
pp.pprint(example)
```
Now that `ExampleGen` has finished ingesting the data, the next step is data analysis.
### StatisticsGen
The `StatisticsGen` component computes statistics over your dataset for data analysis, as well as for use in downstream components. It uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.
`StatisticsGen` takes as input the dataset we just ingested using `ExampleGen`.
```
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
```
After `StatisticsGen` finishes running, we can visualize the outputted statistics. Try playing with the different plots!
```
%%skip_for_export
context.show(statistics_gen.outputs['statistics'])
```
### SchemaGen
The `SchemaGen` component generates a schema based on your data statistics. (A schema defines the expected bounds, types, and properties of the features in your dataset.) It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.
`SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default.
```
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
context.run(schema_gen)
```
After `SchemaGen` finishes running, we can visualize the generated schema as a table.
```
%%skip_for_export
context.show(schema_gen.outputs['schema'])
```
Each feature in your dataset shows up as a row in the schema table, alongside its properties. The schema also captures all the values that a categorical feature takes on, denoted as its domain.
To learn more about schemas, see [the SchemaGen documentation](https://www.tensorflow.org/tfx/guide/schemagen).
### ExampleValidator
The `ExampleValidator` component detects anomalies in your data, based on the expectations defined by the schema. It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.
`ExampleValidator` will take as input the statistics from `StatisticsGen`, and the schema from `SchemaGen`.
By default, it compares the statistics from the evaluation split to the schema from the training split.
```
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
context.run(example_validator)
```
After `ExampleValidator` finishes running, we can visualize the anomalies as a table.
```
%%skip_for_export
context.show(example_validator.outputs['anomalies'])
```
In the anomalies table, we can see that the `company` feature takes on new values that were not in the training split. This information can be used to debug model performance, understand how your data evolves over time, and identify data errors.
In our case, this anomaly is innocuous, so we move on to the next step of transforming the data.
### Transform
The `Transform` component performs feature engineering for both training and serving. It uses the [TensorFlow Transform](https://www.tensorflow.org/tfx/transform/get_started) library.
`Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module that contains user-defined Transform code.
Let's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, [see the tutorial](https://www.tensorflow.org/tfx/tutorials/transform/simple)). First, we define a few constants for feature engineering:
Note: The `%%writefile` cell magic will save the contents of the cell as a `.py` file on disk. This allows the `Transform` component to load your code as a module.
```
_taxi_constants_module_file = 'taxi_constants.py'
%%skip_for_export
%%writefile {_taxi_constants_module_file}
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
LABEL_KEY = 'tips'
FARE_KEY = 'fare'
def transformed_name(key):
return key + '_xf'
```
Next, we write a `preprocessing_fn` that takes in raw data as input, and returns transformed features that our model can train on:
```
_taxi_transform_module_file = 'taxi_transform.py'
%%skip_for_export
%%writefile {_taxi_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
import taxi_constants
_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = taxi_constants.VOCAB_SIZE
_OOV_SIZE = taxi_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS
_FARE_KEY = taxi_constants.FARE_KEY
_LABEL_KEY = taxi_constants.LABEL_KEY
_transformed_name = taxi_constants.transformed_name
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT,
always_return_num_quantiles=False)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
```
Now, we pass in this feature engineering code to the `Transform` component and run it to transform your data.
```
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_taxi_transform_module_file))
context.run(transform)
```
Let's examine the output artifacts of `Transform`. This component produces two types of outputs:
* `transform_output` is the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models).
* `transformed_examples` represents the preprocessed training and evaluation data.
```
transform.outputs
```
Take a peek at the `transform_output` artifact. It points to a directory containing three subdirectories.
```
train_uri = transform.outputs['transform_output'].get()[0].uri
os.listdir(train_uri)
```
The `transformed_metadata` subdirectory contains the schema of the preprocessed data. The `transform_fn` subdirectory contains the actual preprocessing graph. The `metadata` subdirectory contains the schema of the original data.
We can also take a look at the first three transformed examples:
```
# Get the URI of the output artifact representing the transformed examples, which is a directory
train_uri = transform.outputs['transformed_examples'].get()[1].uri
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a TFRecordDataset to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
decoder = tfdv.TFExampleDecoder()
# Iterate over the first 3 records and decode them using a TFExampleDecoder
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = decoder.decode(serialized_example)
pp.pprint(example)
```
After the `Transform` component has transformed your data into features, and the next step is to train a model.
### Trainer
The `Trainer` component will train a model that you define in TensorFlow (either using the Estimator API or the Keras API with [`model_to_estimator`](https://www.tensorflow.org/api_docs/python/tf/keras/estimator/model_to_estimator)).
`Trainer` takes as input the schema from `SchemaGen`, the transformed data and graph from `Transform`, training parameters, as well as a module that contains user-defined model code.
Let's see an example of user-defined model code below (for an introduction to the TensorFlow Estimator APIs, [see the tutorial](https://www.tensorflow.org/tutorials/estimator/premade)):
```
_taxi_trainer_module_file = 'taxi_trainer.py'
%%skip_for_export
%%writefile {_taxi_trainer_module_file}
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
import taxi_constants
_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = taxi_constants.VOCAB_SIZE
_OOV_SIZE = taxi_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS
_MAX_CATEGORICAL_FEATURE_VALUES = taxi_constants.MAX_CATEGORICAL_FEATURE_VALUES
_LABEL_KEY = taxi_constants.LABEL_KEY
_transformed_name = taxi_constants.transformed_name
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
"""Small utility returning a record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add a parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(
features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(filenames, tf_transform_output, batch_size=200):
"""Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
tf_transform_output: A TFTransformOutput.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is a dictionary of
Tensors, and indices is a single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
transformed_features = dataset.make_one_shot_iterator().get_next()
# We pop the label because we do not want to use it as a feature while we're
# training.
return transformed_features, transformed_features.pop(
_transformed_name(_LABEL_KEY))
# TFX will call this function
def trainer_fn(hparams, schema):
"""Build the estimator using the high level API.
Args:
hparams: Holds hyperparameters used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_output = tft.TFTransformOutput(hparams.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
hparams.train_files,
tf_transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
hparams.eval_files,
tf_transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=hparams.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=hparams.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=hparams.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=hparams.warm_start_from)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
```
Now, we pass in this model code to the `Trainer` component and run it to train the model.
```
trainer = Trainer(
module_file=os.path.abspath(_taxi_trainer_module_file),
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
context.run(trainer)
```
#### Analyze Training with TensorBoard
Optionally, we can connect TensorBoard to the Trainer to analyze our model's training curves.
```
%%skip_for_export
# Get the URI of the output artifact representing the training logs, which is a directory
model_dir = trainer.outputs['output'].get()[0].uri
%load_ext tensorboard
%tensorboard --logdir {model_dir}
```
### Evaluator
The `Evaluator` component computes model performance metrics over the evaluation set. It uses the [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) library.
`Evaluator` will take as input the data from `ExampleGen`, the trained model from `Trainer`, and slicing configuration.
The slicing configuration allows you to slice your metrics on feature values (e.g. how does your model perform on taxi trips that start at 8am versus 8pm?). See an example of this configuration below:
```
# An empty slice spec means the overall slice, that is, the whole dataset.
OVERALL_SLICE_SPEC = evaluator_pb2.SingleSlicingSpec()
# Data can be sliced along a feature column
# In this case, data is sliced along feature column trip_start_hour.
FEATURE_COLUMN_SLICE_SPEC = evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
ALL_SPECS = [
OVERALL_SLICE_SPEC,
FEATURE_COLUMN_SLICE_SPEC
]
```
Next, we give this configuration to `Evaluator` and run it.
```
# Use TFMA to compute a evaluation statistics over features of a model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model_exports=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(
specs=ALL_SPECS
))
context.run(evaluator)
```
After `Evaluator` finishes running, we can show the default visualization of global metrics on the entire evaluation set.
```
%%skip_for_export
context.show(evaluator.outputs['output'])
```
To see the visualization for sliced evaluation metrics, we can directly call the TensorFlow Model Analysis library.
```
%%skip_for_export
import tensorflow_model_analysis as tfma
# Get the TFMA output result path and load the result.
PATH_TO_RESULT = evaluator.outputs['output'].get()[0].uri
tfma_result = tfma.load_eval_result(PATH_TO_RESULT)
# Show data sliced along feature column trip_start_hour.
tfma.view.render_slicing_metrics(
tfma_result, slicing_column='trip_start_hour')
```
This visualization shows the same metrics, but computed at every feature value of `trip_start_hour` instead of on the entire evaluation set.
TensorFlow Model Analysis supports many other visualizations, such as Fairness Indicators and plotting a time series of model performance. To learn more, see [the tutorial](https://www.tensorflow.org/tfx/tutorials/model_analysis/tfma_basic).
### ModelValidator
The `ModelValidator` component validates that a newly trained model is better than the previous model. This is useful in a production pipeline setting where you may automatically train and validate a model every day.
In this notebook, we only train one model, so the ModelValidator automatically will label the model as "good".
```
model_validator = ModelValidator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
context.run(model_validator)
```
Let's examine the output artifacts of `ModelValidator`.
```
%%skip_for_export
model_validator.outputs
%%skip_for_export
blessing_uri = model_validator.outputs.blessing.get()[0].uri
!ls -l {blessing_uri}
```
### Pusher
The `Pusher` component is usually at the end of a TFX pipeline. It checks whether a model has passed validation, and if so, exports the model to `_serving_model_dir`.
```
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
context.run(pusher)
```
Let's examine the output artifacts of `Pusher`.
```
%%skip_for_export
pusher.outputs
```
In particular, the Pusher will export your model in the SavedModel format, which looks like this:
```
%%skip_for_export
push_uri = pusher.outputs.model_push.get()[0].uri
latest_version = max(os.listdir(push_uri))
latest_version_path = os.path.join(push_uri, latest_version)
model = tf.saved_model.load(latest_version_path)
for item in model.signatures.items():
pp.pprint(item)
```
We're finished our tour of built-in TFX components!
After you're happy with experimenting with TFX components and code in this notebook, you may want to export it as a pipeline to be orchestrated with Apache Airflow or Apache Beam. See the final section.
## Export to pipeline
To export the contents of this notebook as a pipeline to be orchestrated with Airflow or Beam, follow the instructions below.
If you're using Colab, make sure to **save this notebook to Google Drive** (`File` → `Save a Copy in Drive`) before exporting.
### 1. Mount Google Drive (Colab-only)
If you're using Colab, this notebook needs to mount your Google Drive to be able to access its own `.ipynb` file.
```
%%skip_for_export
#@markdown Run this cell and enter the authorization code to mount Google Drive.
import sys
if 'google.colab' in sys.modules:
# Colab.
from google.colab import drive
drive.mount('/content/drive')
```
### 2. Select an orchestrator
```
_runner_type = 'beam' #@param ["beam", "airflow"]
_pipeline_name = 'chicago_taxi_%s' % _runner_type
```
### 3. Set up paths for the pipeline
```
# For Colab notebooks only.
# TODO(USER): Fill out the path to this notebook.
_notebook_filepath = (
'/content/drive/My Drive/Colab Notebooks/taxi_pipeline_interactive.ipynb')
# For Jupyter notebooks only.
# _notebook_filepath = os.path.join(os.getcwd(),
# 'taxi_pipeline_interactive.ipynb')
# TODO(USER): Fill out the paths for the exported pipeline.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_serving_model_dir = os.path.join(_taxi_root, 'serving_model')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
```
### 4. Choose components to include in the pipeline
```
# TODO(USER): Specify components to be included in the exported pipeline.
components = [
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, model_validator, pusher
]
```
### 5. Generate pipeline files
```
%%skip_for_export
#@markdown Run this cell to generate the pipeline files.
if get_ipython().magics_manager.auto_magic:
print('Warning: %automagic is ON. Line magics specified without the % prefix '
'will not be scrubbed during export to pipeline.')
_pipeline_export_filepath = 'export_%s.py' % _pipeline_name
context.export_to_pipeline(notebook_filepath=_notebook_filepath,
export_filepath=_pipeline_export_filepath,
runner_type=_runner_type)
```
### 6. Download pipeline files
```
%%skip_for_export
#@markdown Run this cell to download the pipeline files as a `.zip`.
if 'google.colab' in sys.modules:
from google.colab import files
import zipfile
zip_export_path = os.path.join(
tempfile.mkdtemp(), 'export.zip')
with zipfile.ZipFile(zip_export_path, mode='w') as export_zip:
export_zip.write(_pipeline_export_filepath)
export_zip.write(_taxi_constants_module_file)
export_zip.write(_taxi_transform_module_file)
export_zip.write(_taxi_trainer_module_file)
files.download(zip_export_path)
```
To learn how to run the orchestrated pipeline with Apache Airflow, please refer to the [TFX Orchestration Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/airflow_workshop).
| github_jupyter |
```
import numpy as np
import xarray as xr
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (8,5)
from scipy.interpolate import *
x = np.linspace(0, 2*np.pi, 11)[0:-1]
y = np.sin(x)
# analytic derivative:
dydxa = np.cos(x)
# numpy derivatives:
dydx1 = np.diff(y)/np.diff(x)
np.shape(y),np.shape(dydx1)
# make a new grid for the derivative at half points: (numpy does not associate vars and coords)
h = x[1] - x[0]
x_half = x[0:-1] + h / 2
plt.plot(x,y,marker='o',label='y')
plt.plot(x,dydxa,marker='o',label='analytic dydx')
plt.plot(x[0:-1],dydx1,marker='o',label='numpy dydx - left points')
plt.plot(x[1:],dydx1,marker='o',label='numpy dydx - right points')
plt.plot(x_half,dydx1,marker='o',label='numpy dydx on half points')
plt.legend(loc='lower left')
# xarray derivatives:
da = xr.DataArray(y ,
dims=['x'],
coords={'x': x})
dydxDA = da.diff('x')/da.x.diff('x')
dydxDA.x
plt.plot(x,dydxa,marker='o',label='analytic dydx')
dydxDA.plot(marker='o',label='xarray?') # default coordinates?
plt.legend()
dy = da.diff('x').rolling(x=2).mean().shift(x=-1).dropna('x')
dx = da.x.diff('x').rolling(x=2).mean().shift(x=-1).dropna('x')
dydx = dy/dx
plt.plot(x,dydxa,marker='o',label='analytic dydx')
dydx.plot(marker='o',label='diff+interp+shift') # default coordinates?
plt.legend()
# What about boundary points? We could extend the whole DataArray:
da_extend = xr.concat([da[-1], da, da[0]],'x')
da_extend
# ... but the coordinate variable gets messed up - must separately extend the data and coordinate
y_extend = xr.concat([da[-1], da, da[0]],'x')
x_extend = xr.concat([da.x[0]-h, da.x, da.x[-1]+h],'x')
dy = y_extend.diff('x').rolling(x=2).mean().shift(x=-1).dropna('x')
dx = x_extend.diff('x').rolling(x=2).mean().shift(x=-1).dropna('x')
dydx_extended = dy/dx
plt.plot(x,dydxa,marker='o',label='analytic dydx')
dydx_extended.plot(marker='o',label='second order dydx')
plt.plot(x_half,dydx1,marker='o',label='numpy dydx')
plt.legend()
abs(dydxa-dydx_extended).max()
for numpoints in [11,21,41,81,161]:
x = np.linspace(0, 2*np.pi, numpoints)[0:-1]
y = np.sin(x)
dydx = np.cos(x)
da = xr.DataArray(y , dims=['x'], coords={'x': x})
h = x[1] - x[0]
y_extend = xr.concat([da[-1], da, da[0]],'x')
x_extend = xr.concat([da.x[0]-h, da.x, da.x[-1]+h],'x')
dy = y_extend.diff('x').rolling(x=2).mean().shift(x=-1).dropna('x')
dx = x_extend.diff('x').rolling(x=2).mean().shift(x=-1).dropna('x')
residual = dy/dx - dydx
print(abs(residual).max())
residual.plot(label=numpoints)
plt.legend()
```
| github_jupyter |
# MNIST Dataset & Database
In the [MNIST tutorial](https://github.com/caffe2/caffe2/blob/master/caffe2/python/tutorials/MNIST.ipynb) we use an lmdb database. You can also use leveldb or even minidb by changing the type reference when you get ready to read from the dbs. In this tutorial, we will go over how to download, extract, and generate lmdb and leveldb variants of the MNIST dataset.
## Dataset:
You can download the raw [MNIST dataset](https://download.caffe2.ai/datasets/mnist/mnist.zip), g/unzip the dataset and labels, and make the database yourself.
## Databases:
We provide a few database formats for you to try with the MNIST tutorial. The default is lmdb.
* [MNIST-nchw-lmdb](https://download.caffe2.ai/databases/mnist-lmdb.zip) - contains both the train and test lmdb MNIST databases in NCHW format
* [MNIST-nchw-leveldb](https://download.caffe2.ai/databases/mnist-leveldb.zip) - contains both the train and test leveldb MNIST databases in NCHW format
* [MNIST-nchw-minidb](https://download.caffe2.ai/databases/mnist-minidb.zip) - contains both the train and test minidb MNIST databases in NCHW format
## Tools:
### make_mnist_db
If you like LevelDB you can use Caffe2's `make_mnist_db` binary to generate leveldb databases. This binary is found in `/caffe2/build/caffe2/binaries/` or depending on your OS and installation, in `/usr/local/bin/`.
Here is an example call to `make_mnist_db`:
```
./make_mnist_db --channel_first --db leveldb --image_file ~/Downloads/train-images-idx3-ubyte --label_file ~/Downloads/train-labels-idx1-ubyte --output_file ~/caffe2/caffe2/python/tutorials/tutorial_data/mnist/mnist-train-nchw-leveldb
./make_mnist_db --channel_first --db leveldb --image_file ~/Downloads/t10k-images-idx3-ubyte --label_file ~/Downloads/t10k-labels-idx1-ubyte --output_file ~/caffe2/caffe2/python/tutorials/tutorial_data/mnist/mnist-test-nchw-leveldb
```
Note leveldb can get deadlocked if more than one user attempts to open the leveldb at the same time. This is why there is logic in the Python below to delete LOCK files if they're found.
### Python script
You can use the Python in the code blocks below to download and extract the dataset with `DownloadResource`, call the `make_mnist_db` binary, and generate your database with `GenerateDB`.
First, we will define our functions.
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
def DownloadResource(url, path):
'''Downloads resources from s3 by url and unzips them to the provided path'''
import requests, zipfile, StringIO
print("Downloading... {} to {}".format(url, path))
r = requests.get(url, stream=True)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(path)
print("Completed download and extraction.")
def GenerateDB(image, label, name):
'''Calls the make_mnist_db binary to generate a leveldb from a mnist dataset'''
name = os.path.join(data_folder, name)
print('DB: ', name)
if not os.path.exists(name):
syscall = "/usr/local/bin/make_mnist_db --channel_first --db leveldb --image_file " + image + " --label_file " + label + " --output_file " + name
# print "Creating database with: ", syscall
os.system(syscall)
else:
print("Database exists already. Delete the folder if you have issues/corrupted DB, then rerun this.")
if os.path.exists(os.path.join(name, "LOCK")):
# print "Deleting the pre-existing lock file"
os.remove(os.path.join(name, "LOCK"))
```
Now that we have our functions for loading, extracting, and generating our dbs, we will put these functions to use and generate the MNIST data in both lmdb and leveldb formats (if they do not already exist).
First, we **download and extract the MNIST dataset (train and test) in lmdb format** using:
```python
DownloadResource("http://download.caffe2.ai/databases/mnist-lmdb.zip", data_folder)
```
Next, we focus on **downloading, extracting, and generating MNIST train and test leveldbs**. We start by downloading and extracting the raw MNIST dataset (in ubyte format). This will ultimately extract four files, consisting of training images and labels, and testing images and labels.
```python
DownloadResource("http://download.caffe2.ai/datasets/mnist/mnist.zip", data_folder)
```
Finally, we **generate the leveldb train and test databases** (or regenerate; it can get locked with multi-user setups or abandoned threads). We do this by passing our `GenerateDB` function the names of the corresponding ubyte files along with an output file name.
```python
GenerateDB(image_file_train, label_file_train, "mnist-train-nchw-leveldb")
GenerateDB(image_file_test, label_file_test, "mnist-test-nchw-leveldb")
```
```
current_folder = os.path.join(os.path.expanduser('~'), 'caffe2_notebooks')
data_folder = os.path.join(current_folder, 'tutorial_data', 'mnist')
# If the data_folder does not already exist, create it
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# Downloads and extracts the lmdb databases of MNIST images - both test and train
if not os.path.exists(os.path.join(data_folder,"mnist-train-nchw-lmdb")):
DownloadResource("http://download.caffe2.ai/databases/mnist-lmdb.zip", data_folder)
else:
print("mnist-lmdb already downloaded and extracted")
# Downloads and extracts the MNIST data set
if not os.path.exists(os.path.join(data_folder, "train-images-idx3-ubyte")):
DownloadResource("http://download.caffe2.ai/datasets/mnist/mnist.zip", data_folder)
else:
print("Raw mnist ubyte data already downloaded and extracted")
# (Re)generate the leveldb database (it can get locked with multi-user setups or abandoned threads)
# Requires the download of the dataset (mnist.zip) - see DownloadResource above.
# You also need to change references in the MNIST tutorial code where you train or test from lmdb to leveldb
image_file_train = os.path.join(data_folder, "train-images-idx3-ubyte")
label_file_train = os.path.join(data_folder, "train-labels-idx1-ubyte")
image_file_test = os.path.join(data_folder, "t10k-images-idx3-ubyte")
label_file_test = os.path.join(data_folder, "t10k-labels-idx1-ubyte")
GenerateDB(image_file_train, label_file_train, "mnist-train-nchw-leveldb")
GenerateDB(image_file_test, label_file_test, "mnist-test-nchw-leveldb")
```
## Code Changes for Other DBs
If you chose to use a format other than lmdb you will need to change a couple lines of code. When you use `ModelHelper` to instantiate the CNN, you pass in the `db` parameter with a path and the `db_type` with the type of db. You would need to update both of these values. Since you create two networks, one for training and one for testing, you would need to update the code for both of these.
**Default code using lmdb**
```python
train_model = model_helper.ModelHelper(name="mnist_train", arg_scope=arg_scope)
data, label = AddInput(
train_model, batch_size=64,
db=os.path.join(data_folder, 'mnist-train-nchw-lmdb'),
db_type='lmdb')
```
**Updated code using leveldb**
```python
train_model = model_helper.ModelHelper(name="mnist_train", arg_scope=arg_scope)
data, label = AddInput(
train_model, batch_size=64,
db=os.path.join(data_folder, 'mnist-train-nchw-leveldb'),
db_type='leveldb')
```
| github_jupyter |

# The Schrödinger Equation
>The underlying physical laws necessary for the mathematical theory of a large part of physics and the whole of chemistry are thus completely known, and the difficulty is only that the exact application of these laws leads to equations much too complicated to be soluble. It therefore becomes desirable that approximate practical methods of applying quantum mechanics should be developed, which can lead to an explanation of the main features of complex atomic systems without too much computation.
- Paul A. M. Dirac (1929)
## 🥅 Learning Objectives
- Review of Plane Wave(functions)
- "Derivation" of the Time-Dependent Schrödinger Equation (TDSE)
- The Hamiltonian
- The Time-Independent Schrödinger Equation (TISE)
- Separation of Variables Method
## Motivation for the Schrödinger Equation
Just like Newton’s equations or Maxwell’s equations, the Schrödinger equation is a law of nature: it cannot be “derived.” Instead, it must be postulated, and then tested by experiment. However, just as Newton’s equations can be justified insofar as they encapsulate the “laws of motion” formulated by Galileo and Newton, and Maxwell’s equations can be justified insofar as they encapsulate the “laws of electromagnetism” formulated by Gauss, Faraday, Coulomb, etc., Schrödinger's equation can be justified by observing that it encapsulates the “laws” of Planck and De Broglie, namely that
$$ E = h \nu = \hbar \omega $$
$$ p = \tfrac{h}{\lambda} = \tfrac{h \nu}{c} = \hbar k = h \tilde{\nu} $$
### Angular Frequency and Wavenumber
In these equations I have introduced several new symbols, mostly related to the fact it is often convenient to use angular frequency,
$$ \omega = 2 \pi \nu $$
which naturally pairs with
$$ \hbar = \tfrac{h}{2 \pi} $$
Similarly, it is often convenient, especially in spectroscopic studies, to use wavenumber,
$$ \tilde{\nu} = \tfrac{1}{\lambda} = \lambda^{-1} $$
or its angular analogue
$$ k = \tfrac{2 \pi}{\lambda} = 2 \pi \tilde{nu} $$
Sometimes it is also useful to consider the period of the wave,
$$ \text{T} = \nu^{-1} $$
Recall that the wavelength, frequency, period, and wavenumber of a wave can be related to the speed of light by relations like:
$$ c = \lambda \nu = \tfrac{\lambda}{\text{T}} = \tilde{\nu} \nu $$
$$ c = \tfrac{\omega}{k} $$
#### 📝 Exercise: The photon emitted when a 4p electron in the Hydrogen atom deexcites into a 2s orbital has wavelength 486.1 nm. Compute its
- **frequency in Hz**
- **angular frequency in Hz**
- **wavenumber, $\bar{\nu}$, in $\text{cm}^{-1}$.**
- **angular wavenumber, $k$, in $\text{m}^{-1}$.**
- **period in s.**
- **momentum in m kg/s.**
- **energy in Joules.**
[Answer](SchrodingerExercise1.ipynb)
: EMANIM: Interactive visualization of electromagnetic waves. Web application available at URL https://emanim.szialab.org")
### The Wavefunction
The use of angular frequency and wave number is especially useful when dealing with waves, because the need to explicitly include factors of $2 \pi$ to describe the periodic oscillations is removed. A one-dimensional plane wave, moving in the $x$ direction, with amplitude $A$, wavelength $\lambda = \tfrac{2 \pi}{k}$, and frequency $\nu = \tfrac{\omega}{2 \pi}$ is described by
$$ \Psi(x,t) = A e^{i (kx - \omega t)} = A\left(\cos(kx-\omega t) + i \sin(kx - \omega t) \right) $$
Since $\Psi(x,t)$ is the function that describes a wave, we call it a "wavefunction."
### Expression for the Total Energy
Based on electromagnetism, we believe this equation should give us the essential insight we need to describe the "waviness of particles." We need, however, a second equation that will allow us to describe the "particulateness of waves." Since the energy of a wave is proportional to $\omega$ (Planck-Einstein relation) and the momentum of a wave is proportional to $k$ (De Broglie relation), an equation that relates the energy to the momentum is an obvious candidate. For example,
$$
\begin{align}
\text{energy} &= \text{kinetic energy} + \text{potential energy}\\
E &= T + V(x,t)\\
E &= \frac{p^2}{2m} + V(x,t)
\end{align}
$$
### The Time-Dependent Schrödinger Equation (TDSE)
We need to find a way to combine the key equations from the previous two sections. To do this, note that
$$ \frac{\partial \Psi(x,t)}{\partial t} = (-i \omega) A e^{i (kx - \omega t)} = (-i \omega) \Psi(x,t) $$
We can link this to Planck's equation for the energy of a photon by multiplying both sides by $i \hbar$,
$$
\begin{align}
i \hbar \frac{\partial \Psi(x,t)}{\partial t} &= (i \hbar) (-i \omega) \Psi(x,t) = \hbar \omega \Psi(x,t) \\
&= E \Psi(x,t)
\end{align}
$$
Similarly, differentiating with respect to position,
$$
\begin{align}
\frac{\partial \Psi(x,t)}{\partial x} &= (i k) A e^{i (kx - \omega t)} \\
\frac{\partial^2 \Psi(x,t)}{\partial x^2} &= (i k)^2 A e^{i (kx - \omega t)} = -k^2 \Psi(x,t)
\end{align}
$$
Multiplying both sides by $-\hbar^2$ provides a link to the De Broglie relation,
$$
-\hbar^2 \frac{\partial^2 \Psi(x,t)}{\partial x^2} = (\hbar k)^2 \Psi(x,t) = p^2 \Psi(x,t)
$$
Now, let's consider our expression for the energy,
$$
\begin{align}
E &= \frac{p^2}{2m} + V(x,t) \\
E \Psi(x,t) &= \frac{p^2}{2m} \Psi(x,t) + V(x,t)\Psi(x,t)
\end{align}
$$
Substituting in the equations for the energy and the momentum-squared in terms of wavefunction derivatives, we have:
$$ i \hbar \frac{\partial \Psi(x,t)}{\partial t} = -\frac{\hbar^2}{2m}\frac{\partial^2 \Psi(x,t)}{\partial x^2} + V(x,t)\Psi(x,t) $$
This is the [*time-dependent Schrödinger equation*](https://en.wikipedia.org/wiki/Schr%C3%B6dinger_equation#Time-dependent_equation), which we often abbreviate as the TDSE.
The biggest limitations of the Schrödinger equation is that it was derived using the nonrelativistic expression for the kinetic energy of a particle, $T = \tfrac{p^2}{2m}$. For this reason, the Schrödinger equation is not valid for particles that are moving at a substantial fraction of the speed of light (where relativistic effects are important) or for light itself. Quantum mechanics can be extended to such cases, and relativistic quantum mechanics is quite important for the heavier atoms in the periodic table. (Typically relativity is neglected for atoms up to Zinc (Z=30) or Krypton (Z=36), and the included past that. However there are a few molecules containing lighter elements—e.g., the Sulfur dimer—where there are chemically (or at least spectroscopically) interesting relativistic effects.) The full relativistic treatment of quantum mechanics leads to “quantum electrodynamics,” which is a rich and interesting subject that we, alas, will not have time to cover.
#### 📝 Exercise: Show that the time-dependent Schrödinger equation for the complex conjugate of the wavefunction, $\Psi^*$, is:
$$
-i \hbar \frac{d \Psi^*(x,t)}{dt} = - \frac{\hbar}{2m} \frac{d^2 \Psi^*(x,t)}{dx^2} + V(x,t)\Psi^*(x,t)
$$
#### 📝 Exercise: What is the complex conjugate of
$$
\Psi(x,t) = A e^{(a+bi)(kx - \omega t)}
$$
[Answer](SchrodingerExercise2.ipynb)
### The Hamiltonian
An equation like Schrödinger’s may seem mysterious to us now, but at the time it was derived it seemed very natural. A top-rate physicist like Schrödinger had a rich background in classical physics, and he would have understood that there were many results, from classical physics, that suggested that the equations for radiation and for matter could be combined. One of the strongest such suggestions came from two principles: “Fermat’s principle of least time” and the “Hamilton-Lagrange principle of least action.” Fermat’s principle says that a ray of light goes from point A to point B in the fastest possible way, and can be used to derive all of classical optics. The Hamilton-Lagrange principle says that a particle goes from point A to point B in a way that makes minimizes its action (the product of its momentum and its change in position) along the path. Schrödinger would have noticed that Planck’s constant had units of action, and then quickly ascertained that laws of quantum mechanics might be governed by an equation similar to the Hamilton-Jacobi equation for the action,
$$\frac{1}{2m}\left(\frac{\partial S}{\partial x}\right)^2 + V(x,t) = \frac{\partial S}{\partial t} $$
or, equivalently,
$$ S \left(-\frac{1}{2m} \frac{\partial^2S}{\partial x^2} + V(x,t) \right) = \frac{\partial S}{\partial t} - \frac{1}{2m}\frac{\partial^2S^2}{\partial x^2}$$
The second term on the right-hand-side of this equation ordinarily integrates to zero (owing to the divergence theorem).
Similarly, as hinted at in the previous set of notes, the Schrödinger equation can be deduced from the (quantum-mechanical) principle of least (really stationary) action. Indeed, the stationary-action principle is more fundamental than the Schrödinger equation, and is the basis for quantum electrodynamics, which is the relativistic theory for matter and electromagnetic radiation that undergirds classical mechanics and classical electromagnetism (in the $\hbar \rightarrow 0$ limit) and the Schrödinger equation (for nonrelativistic matter, without concern for how matter and radiation interact). Feynman and Schwinger won the Nobel Prize for quantum electrodynamics, but its origins go back to Schrödinger's and Dirac's observations about the links between the (least) action principle and the Schrödinger equation.
The Schrödinger equation is usually written in the form:
$$ {\color{blue}\left( -\frac{\hbar^2}{2m} \frac{\partial^2}{\partial x^2}+V(x,t)\right)} \Psi(x,t) = i \hbar \frac{\partial \Psi(x,t)}{\partial t} $$
The term in <span style="color:blue">blue</span> is the [Hamiltonian](https://en.wikipedia.org/wiki/Hamiltonian_(quantum_mechanics)) operator, $\hat{H}(x,t)$; it is the operator for the energy in quantum mechanics. In classical mechanics, the [Hamiltonian](https://en.wikipedia.org/wiki/Hamiltonian_mechanics) is exactly the equation that we started with when we wrote a (classical) expression for the energy of matter, $H(x,t) = \tfrac{p^2}{2m} + V(x,t)$.
Using the Hamiltonian, we can then write the TDSE in a compact form,
$$ {\color{blue}\hat{H}(x,t)}\Psi(x,t) = i \hbar \frac{\partial \Psi(x,t)}{\partial t} $$
As in classical mechanics, the Hamiltonian is the operator for the energy. In quantum mechanics, every observable quantity corresponds to an operator. For example, the kinetic energy operator is
$$ \hat{T} = -\frac{\hbar^2}{2m} \frac{\partial^2}{\partial x^2} $$
Moreover, the values of the properties of any quantum-mechanical operator correspond to its eigenvalues,
$$ (\text{operator}) \Psi = (\text{eigenvalue}) \Psi $$
This suggests that there should be another equation with the form
$$ \hat{H}\Psi = E \Psi $$
This equation is called the [time-independent Schrödinger equation](https://en.wikipedia.org/wiki/Schr%C3%B6dinger_equation#Time-independent_equation) (TISE).
My father, an organic chemist, always says he knew that quantum mechanics would be problematic the first time he saw the Schrödinger equation. He still refers to $\Psi$ as "Satan's pitchfork" and $\hat{H}$ as the "hell operator." Most people, however, call $\hat{H}$ the Hamiltonian and $\Psi$ the wavefunction.
#### 📝 Exercise: Ground-State Energy of the Morse Potential
The Morse potential is often used as an approximate model for the vibrations of diatomic molecules. In convenient units where $\frac{\hslash ^{2}}{2m}=1$, the time-independent Schrödinger equation for a Morse oscillator can be written as:
$$
\left(-\frac{d^{2}}{dx^{2}}+\lambda ^{2}\left(e^{-2x}-2e^{-x}\right)\right)\psi _{n}\left(x\right)=E_{n}\psi _{n}\left(x\right)
$$
The first two eigenfunctions of the Morse oscillator are given by the following expressions (which are not normalized)
$$
\begin{align}
\psi _{0}\left(x\right)&=\exp \left(-\left(\lambda -\tfrac{1}{2}\right)x-\lambda e^{-x}\right)\\
\psi _{1}\left(x\right)&=\exp \left(-\left(\lambda -\tfrac{3}{2}\right)x-\lambda e^{-x}\right)\left(2\lambda -2-2\lambda e^{-x}\right)
\end{align}
$$
**What is the expression for the ground-state energy for the Morse oscillator?**
[Answer](SchrodingerExercise3.md)
### The Time-Independent Schrödinger Equation (TISE)
In much of chemistry, the Hamiltonian operator is not time-dependent. For examples, the electrons in a molecule feel a fixed (time-independent) attraction to the nuclei in the molecule and, to a first approximation, the (very small) motion of the nuclei can be neglected. Similarly, in the absence of a time-varying external field, a molecule as a whole (containing both electrons and nuclei) does not feel a time-varying potential.
To derive the time-independent Schrödinger equation, we insert the time-independent Hamiltonian,
$$\hat{H}(x) = -\frac{\hbar^2}{2m} \frac{\partial^2}{\partial x^2} +V(x)$$
into the time-dependent Schrödinger equation,
$$ \hat{H}(x)\Psi(x,t) = i \hbar \frac{\partial \Psi(x,t)}{\partial t} $$
and then rearrange the equation into the form
$$ \left( \hat{H}(x) - i\hbar \frac{\partial}{\partial t} \right) \Psi(x,t) = 0 $$
This is a differential equation that can be solved by the general technique of [separation of variables](https://en.wikipedia.org/wiki/Separation_of_variables). Specifically,
> [**Separation of Variables**](https://en.wikipedia.org/wiki/Separation_of_variables#Partial_differential_equations) Given an operator that is a sum of two operators that depend on different (sets of) variables,
$$ \hat{A}(x,y) = \hat{A}_x(x) + \hat{A}_y(y) $$
The solution to the eigenvalue equation
$$ \hat{A}(x,y) \Psi(x,y) = a \Psi(x,y) $$
is
$$\Psi(x,y) = \Psi_x(x) \cdot \Psi_y(y) $$
$$ a = a_x + a_y $$
where
$$\hat{A}_x(x) \Psi_x(x) = a_x \Psi_x(x) $$
$$\hat{A}_y(y) \Psi_y(y) = a_y \Psi_y(y) $$
This tells us that for a time-independent Hamiltonian, the wavefunction has the form $\Psi(x,t) = \psi(x) \phi(t)$, where
$$\hat{H}(x) \psi(x) = E \psi(x) $$
$$- i\hbar \frac{\partial}{\partial t} \phi(t) = -E\phi(t) $$
In the first equation, we have chosen the eigenvalue of the TDSE to be the energy since we know that the Hamiltonian is the quantum-mechanical operator for the energy. The second equation is a separable differential equation, which can be solved as follows:
$$
\begin{align}
-i\hbar \frac{\partial}{\partial t} \phi(t) &= -E\phi(t) \\
\frac{1}{\phi} d\phi &= -\frac{i}{\hbar}E dt \\
\int \frac{1}{\phi} d\phi = \int \frac{-i}{\hbar}E dt \\
\ln \phi(t) &= \frac{-iEt}{\hbar} + \text{constant of integration}\\
\phi(t) &\propto e^{\frac{-iEt}{\hbar}}
\end{align}
$$
Therefore,
$$ \Psi(x,t) = \psi(x) \phi(t) \propto \psi(x) e^{\frac{-iEt}{\hbar}} $$
The constant of proportionality is not important here, because the time-dependent and time-independent Schrödinger equations are satisfied even if $\Psi(x)$ and $\phi(t)$ are multiplied by constants.
## Summary
When solving the time-independent Schrödinger equation, we need to find all the possible values of E that solve this equation. There are almost always very many different values of E that work and, for this reason, we usually label the solutions to the equation, which are the eigenfunctions (eigenvectors) and eigenvalues of the Hamiltonian operator, accordingly:
$$ \hat{H}(x)\psi_k(x) = E_k \psi_k(x) $$
This course is largely about the “art” of solving the TISE
Sometimes in chemistry we cannot use the time-independent Schrödinger equation, and need to use the time-dependent Schrödinger equation instead. This is particular important in spectroscopy, where we want to understand how a molecule responds to a time-dependent external electromagnetic field.
## 🪞 Self-Reflection
The Hamiltonian operator enters the Schrödinger equation in a special way, and in this sense is a "more important" operator than other quantum-mechanical operators. Why do you think this is the case?
## ❓ Knowledge Tests
- Questions about the Schrodinger Equation. [GitHub Classroom Link](https://classroom.github.com/a/yXuh6NJt)
## 🤔 Thought-Provoking Questions
- Once upon a time, one of the professors at McMaster was asked "why is the quantum-mechanical momentum operator associated with the derivative" and, unable to answer the student, sent the student to me for clarification. Can you justify why the quantum mechanical momentum operator is $\hat{p} = -i \hbar \nabla$?
- Once upon a time, a person interviewing for a job as a quantum chemist was asked how it was possible that the kinetic energy of a quantum system was always positive, given that the quantum-mechanical operator for the kinetic energy was "the negative of something squared", namely, $\hat{T} = \tfrac{-1}{2m}\nabla^2$. How would you answer this question?
- What are the eigenfunctions of the momentum operator?
- What are the eigenfunctions of the kinetic-energy operator?
- Is every eigenfunction of the momentum operator also an eigenfunction of the kinetic-energy operator? Is the [converse](https://en.wikipedia.org/wiki/Converse_(logic)) also true?
- Suppose that one has an electron that is tethered to the origin by a spring, so that the force the electrons feels towards the origin, $x=0$, increases proportionally to its distance, $F = -kx$. What would the Hamiltonian be?
- What are the eigenfunctions of the kinetic-energy operator?
- What are the eigenfunctions of the momentum operator?
- Show, mathematically, that the eigenvectors and eigenvalues of a separable operator, $\hat{A}(x,y) = \hat{A}(x) + \hat{A}(y)$ are given by the above expressions.
## 🔁 Recapitulation
- Write the quantum-mechanical operator for the energy, kinetic-energy, potential energy, and momentum.
- Write the time-independent and time-dependent Schrödinger equations.
- What is the form of the time-dependent wavefunction that solves the TDSE when the Hamiltonian is time-independent.
## 🔮 Next Up...
- Study a particle confined to a box with infinite sides.
- Explore the postulates of quantum mechanics.
## 📚 References
My favorite sources for this material are:
- R. Eisberg and R. Resnick, Quantum Physics of Atoms, Molecules, Solids, Nuclei, and Particles (Wiley, New York, 1974)
- R. Dumont, [An Emergent Reality, Part 2: Quantum Mechanics](https://github.com/PaulWAyers/IntroQChem/blob/main/documents/DumontBook.pdf?raw=true) (Chapters 1 and 2).
- Also see my (pdf) class [notes](https://github.com/PaulWAyers/IntroQChem/blob/main/documents/IntroQM.pdf?raw=true).
- [Davit Potoyan's](https://www.chem.iastate.edu/people/davit-potoyan) Jupyter-book course "derives" the Schrodinger equation in a somewhat different way. [Chapter 3](https://dpotoyan.github.io/Chem324/Lec3-0.html) is especially relevant here.
There are also some excellent wikipedia articles:
- [Plane Wave](https://en.wikipedia.org/wiki/Sinusoidal_plane_wave)
- [Wavelength-Frequency Relation](https://en.wikipedia.org/wiki/Wavelength)
- Schrödinger Equation
- [Schrödinger Equation](http://en.wikipedia.org/wiki/Schrodinger_equation)
- [Theoretical Justification of S.E.](http://en.wikipedia.org/wiki/Theoretical_and_experimental_justification_for_the_Schr%C3%B6dinger_equation).
- [Momentum Operator](http://en.wikipedia.org/wiki/Momentum_operator)
| github_jupyter |
# Machine Learning Foundation
## Section 2, Part d: Regularization and Gradient Descent
## Introduction
We will begin with a short tutorial on regression, polynomial features, and regularization based on a very simple, sparse data set that contains a column of `x` data and associated `y` noisy data. The data file is called `X_Y_Sinusoid_Data.csv`.
```
import os
data_path = ['data']
```
## Question 1
* Import the data.
* Also generate approximately 100 equally spaced x data points over the range of 0 to 1. Using these points, calculate the y-data which represents the "ground truth" (the real function) from the equation: $y = sin(2\pi x)$
* Plot the sparse data (`x` vs `y`) and the calculated ("real") data.
```
import pandas as pd
import numpy as np
filepath = os.sep.join(data_path + ['X_Y_Sinusoid_Data.csv'])
data = pd.read_csv(filepath)
X_real = np.linspace(0, 1.0, 100)
Y_real = np.sin(2 * np.pi * X_real)
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set_style('white')
sns.set_context('talk')
sns.set_palette('dark')
# Plot of the noisy (sparse)
ax = data.set_index('x')['y'].plot(ls='', marker='o', label='data')
ax.plot(X_real, Y_real, ls='--', marker='', label='real function')
ax.legend()
ax.set(xlabel='x data', ylabel='y data');
```
## Question 2
* Using the `PolynomialFeatures` class from Scikit-learn's preprocessing library, create 20th order polynomial features.
* Fit this data using linear regression.
* Plot the resulting predicted value compared to the calculated data.
Note that `PolynomialFeatures` requires either a dataframe (with one column, not a Series) or a 2D array of dimension (`X`, 1), where `X` is the length.
```
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
# Setup the polynomial features
degree = 20
pf = PolynomialFeatures(degree)
lr = LinearRegression()
# Extract the X- and Y- data from the dataframe
X_data = data[['x']]
Y_data = data['y']
# Create the features and fit the model
X_poly = pf.fit_transform(X_data)
lr = lr.fit(X_poly, Y_data)
Y_pred = lr.predict(X_poly)
# Plot the result
plt.plot(X_data, Y_data, marker='o', ls='', label='data', alpha=1)
plt.plot(X_real, Y_real, ls='--', label='real function')
plt.plot(X_data, Y_pred, marker='^', alpha=.5, label='predictions w/ polynomial features')
plt.legend()
ax = plt.gca()
ax.set(xlabel='x data', ylabel='y data');
```
## Question 3
* Perform the regression on using the data with polynomial features using ridge regression ($\alpha$=0.001) and lasso regression ($\alpha$=0.0001).
* Plot the results, as was done in Question 1.
* Also plot the magnitude of the coefficients obtained from these regressions, and compare them to those obtained from linear regression in the previous question. The linear regression coefficients will likely need a separate plot (or their own y-axis) due to their large magnitude.
What does the comparatively large magnitude of the data tell you about the role of regularization?
```
# Mute the sklearn warning about regularization
import warnings
warnings.filterwarnings('ignore', module='sklearn')
from sklearn.linear_model import Ridge, Lasso
# The ridge regression model
rr = Ridge(alpha=0.001)
rr = rr.fit(X_poly, Y_data)
Y_pred_rr = rr.predict(X_poly)
# The lasso regression model
lassor = Lasso(alpha=0.0001)
lassor = lassor.fit(X_poly, Y_data)
Y_pred_lr = lassor.predict(X_poly)
# The plot of the predicted values
plt.plot(X_data, Y_data, marker='o', ls='', label='data')
plt.plot(X_real, Y_real, ls='--', label='real function')
plt.plot(X_data, Y_pred, label='linear regression', marker='^', alpha=.5)
plt.plot(X_data, Y_pred_rr, label='ridge regression', marker='^', alpha=.5)
plt.plot(X_data, Y_pred_lr, label='lasso regression', marker='^', alpha=.5)
plt.legend()
ax = plt.gca()
ax.set(xlabel='x data', ylabel='y data');
# let's look at the absolute value of coefficients for each model
coefficients = pd.DataFrame()
coefficients['linear regression'] = lr.coef_.ravel()
coefficients['ridge regression'] = rr.coef_.ravel()
coefficients['lasso regression'] = lassor.coef_.ravel()
coefficients = coefficients.applymap(abs)
coefficients.describe() # Huge difference in scale between non-regularized vs regularized regression
colors = sns.color_palette()
# Setup the dual y-axes
ax1 = plt.axes()
ax2 = ax1.twinx()
# Plot the linear regression data
ax1.plot(lr.coef_.ravel(),
color=colors[0], marker='o', label='linear regression')
# Plot the regularization data sets
ax2.plot(rr.coef_.ravel(),
color=colors[1], marker='o', label='ridge regression')
ax2.plot(lassor.coef_.ravel(),
color=colors[2], marker='o', label='lasso regression')
# Customize axes scales
ax1.set_ylim(-2e14, 2e14)
ax2.set_ylim(-25, 25)
# Combine the legends
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax1.legend(h1+h2, l1+l2)
ax1.set(xlabel='coefficients',ylabel='linear regression')
ax2.set(ylabel='ridge and lasso regression')
ax1.set_xticks(range(len(lr.coef_)));
```
## Question 4
For the remaining questions, we will be working with the [data set](https://www.kaggle.com/c/house-prices-advanced-regression-techniques) from last lesson, which is based on housing prices in Ames, Iowa. There are an extensive number of features--see the exercises from week three for a discussion of these features.
To begin:
* Import the data with Pandas, remove any null values, and one hot encode categoricals. Either Scikit-learn's feature encoders or Pandas `get_dummies` method can be used.
* Split the data into train and test sets.
* Log transform skewed features.
* Scaling can be attempted, although it can be interesting to see how well regularization works without scaling features.
```
filepath = os.sep.join(data_path + ['Ames_Housing_Sales.csv'])
data = pd.read_csv(filepath, sep=',')
```
Create a list of categorial data and one-hot encode. Pandas one-hot encoder (`get_dummies`) works well with data that is defined as a categorical.
```
# Get a Pd.Series consisting of all the string categoricals
one_hot_encode_cols = data.dtypes[data.dtypes == np.object] # filtering by string categoricals
one_hot_encode_cols = one_hot_encode_cols.index.tolist() # list of categorical fields
# Here we see another way of one-hot-encoding:
# Encode these columns as categoricals so one hot encoding works on split data (if desired)
for col in one_hot_encode_cols:
data[col] = pd.Categorical(data[col])
# Do the one hot encoding
data = pd.get_dummies(data, columns=one_hot_encode_cols)
```
Next, split the data in train and test data sets.
```
from sklearn.model_selection import train_test_split
train, test = train_test_split(data, test_size=0.3, random_state=42)
```
There are a number of columns that have skewed features--a log transformation can be applied to them. Note that this includes the `SalePrice`, our predictor. However, let's keep that one as is.
```
# Create a list of float colums to check for skewing
mask = data.dtypes == np.float
float_cols = data.columns[mask]
skew_limit = 0.75
skew_vals = train[float_cols].skew()
skew_cols = (skew_vals
.sort_values(ascending=False)
.to_frame()
.rename(columns={0:'Skew'})
.query('abs(Skew) > {0}'.format(skew_limit)))
skew_cols
```
Transform all the columns where the skew is greater than 0.75, excluding "SalePrice".
```
# OPTIONAL: Let's look at what happens to one of these features, when we apply np.log1p visually.
field = "BsmtFinSF1"
fig, (ax_before, ax_after) = plt.subplots(1, 2, figsize=(10, 5))
train[field].hist(ax=ax_before)
train[field].apply(np.log1p).hist(ax=ax_after)
ax_before.set(title='before np.log1p', ylabel='frequency', xlabel='value')
ax_after.set(title='after np.log1p', ylabel='frequency', xlabel='value')
fig.suptitle('Field "{}"'.format(field));
# a little bit better
# Mute the setting wtih a copy warnings
pd.options.mode.chained_assignment = None
for col in skew_cols.index.tolist():
if col == "SalePrice":
continue
train[col] = np.log1p(train[col])
test[col] = test[col].apply(np.log1p) # same thing
```
Separate features from predictor.
```
feature_cols = [x for x in train.columns if x != 'SalePrice']
X_train = train[feature_cols]
y_train = train['SalePrice']
X_test = test[feature_cols]
y_test = test['SalePrice']
```
## Question 5
* Write a function **`rmse`** that takes in truth and prediction values and returns the root-mean-squared error. Use sklearn's `mean_squared_error`.
```
from sklearn.metrics import mean_squared_error
def rmse(ytrue, ypredicted):
return np.sqrt(mean_squared_error(ytrue, ypredicted))
```
* Fit a basic linear regression model
* print the root-mean-squared error for this model
* plot the predicted vs actual sale price based on the model.
```
from sklearn.linear_model import LinearRegression
linearRegression = LinearRegression().fit(X_train, y_train)
linearRegression_rmse = rmse(y_test, linearRegression.predict(X_test))
print(linearRegression_rmse)
f = plt.figure(figsize=(6,6))
ax = plt.axes()
ax.plot(y_test, linearRegression.predict(X_test),
marker='o', ls='', ms=3.0)
lim = (0, y_test.max())
ax.set(xlabel='Actual Price',
ylabel='Predicted Price',
xlim=lim,
ylim=lim,
title='Linear Regression Results');
```
## Question 6
Ridge regression uses L2 normalization to reduce the magnitude of the coefficients. This can be helpful in situations where there is high variance. The regularization functions in Scikit-learn each contain versions that have cross-validation built in.
* Fit a regular (non-cross validated) Ridge model to a range of $\alpha$ values and plot the RMSE using the cross validated error function you created above.
* Use $$[0.005, 0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 80]$$ as the range of alphas.
* Then repeat the fitting of the Ridge models using the range of $\alpha$ values from the prior section. Compare the results.
Now for the `RidgeCV` method. It's not possible to get the alpha values for the models that weren't selected, unfortunately. The resulting error values and $\alpha$ values are very similar to those obtained above.
```
from sklearn.linear_model import RidgeCV
alphas = [0.005, 0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 80]
ridgeCV = RidgeCV(alphas=alphas,
cv=4).fit(X_train, y_train)
ridgeCV_rmse = rmse(y_test, ridgeCV.predict(X_test))
print(ridgeCV.alpha_, ridgeCV_rmse)
```
## Question 7
Much like the `RidgeCV` function, there is also a `LassoCV` function that uses an L1 regularization function and cross-validation. L1 regularization will selectively shrink some coefficients, effectively performing feature elimination.
The `LassoCV` function does not allow the scoring function to be set. However, the custom error function (`rmse`) created above can be used to evaluate the error on the final model.
Similarly, there is also an elastic net function with cross validation, `ElasticNetCV`, which is a combination of L2 and L1 regularization.
* Fit a Lasso model using cross validation and determine the optimum value for $\alpha$ and the RMSE using the function created above. Note that the magnitude of $\alpha$ may be different from the Ridge model.
* Repeat this with the Elastic net model.
* Compare the results via table and/or plot.
Use the following alphas:
`[1e-5, 5e-5, 0.0001, 0.0005]`
```
from sklearn.linear_model import LassoCV
alphas2 = np.array([1e-5, 5e-5, 0.0001, 0.0005])
lassoCV = LassoCV(alphas=alphas2,
max_iter=5e4,
cv=3).fit(X_train, y_train)
lassoCV_rmse = rmse(y_test, lassoCV.predict(X_test))
print(lassoCV.alpha_, lassoCV_rmse) # Lasso is slower
```
We can determine how many of these features remain non-zero.
```
print('Of {} coefficients, {} are non-zero with Lasso.'.format(len(lassoCV.coef_),
len(lassoCV.coef_.nonzero()[0])))
```
Now try the elastic net, with the same alphas as in Lasso, and l1_ratios between 0.1 and 0.9
```
from sklearn.linear_model import ElasticNetCV
l1_ratios = np.linspace(0.1, 0.9, 9)
elasticNetCV = ElasticNetCV(alphas=alphas2,
l1_ratio=l1_ratios,
max_iter=1e4).fit(X_train, y_train)
elasticNetCV_rmse = rmse(y_test, elasticNetCV.predict(X_test))
print(elasticNetCV.alpha_, elasticNetCV.l1_ratio_, elasticNetCV_rmse)
```
Comparing the RMSE calculation from all models is easiest in a table.
```
rmse_vals = [linearRegression_rmse, ridgeCV_rmse, lassoCV_rmse, elasticNetCV_rmse]
labels = ['Linear', 'Ridge', 'Lasso', 'ElasticNet']
rmse_df = pd.Series(rmse_vals, index=labels).to_frame()
rmse_df.rename(columns={0: 'RMSE'}, inplace=1)
rmse_df
```
We can also make a plot of actual vs predicted housing prices as before.
```
f = plt.figure(figsize=(6,6))
ax = plt.axes()
labels = ['Ridge', 'Lasso', 'ElasticNet']
models = [ridgeCV, lassoCV, elasticNetCV]
for mod, lab in zip(models, labels):
ax.plot(y_test, mod.predict(X_test),
marker='o', ls='', ms=3.0, label=lab)
leg = plt.legend(frameon=True)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(1.0)
ax.set(xlabel='Actual Price',
ylabel='Predicted Price',
title='Linear Regression Results');
```
## Question 8
Let's explore Stochastic gradient descent in this exercise.
Recall that Linear models in general are sensitive to scaling.
However, SGD is *very* sensitive to scaling.
Moreover, a high value of learning rate can cause the algorithm to diverge, whereas a too low value may take too long to converge.
* Fit a stochastic gradient descent model without a regularization penalty (the relevant parameter is `penalty`).
* Now fit stochastic gradient descent models with each of the three penalties (L2, L1, Elastic Net) using the parameter values determined by cross validation above.
* Do not scale the data before fitting the model.
* Compare the results to those obtained without using stochastic gradient descent.
```
# Import SGDRegressor and prepare the parameters
from sklearn.linear_model import SGDRegressor
model_parameters_dict = {
'Linear': {'penalty': 'none'},
'Lasso': {'penalty': 'l2',
'alpha': lassoCV.alpha_},
'Ridge': {'penalty': 'l1',
'alpha': ridgeCV_rmse},
'ElasticNet': {'penalty': 'elasticnet',
'alpha': elasticNetCV.alpha_,
'l1_ratio': elasticNetCV.l1_ratio_}
}
new_rmses = {}
for modellabel, parameters in model_parameters_dict.items():
# following notation passes the dict items as arguments
SGD = SGDRegressor(**parameters)
SGD.fit(X_train, y_train)
new_rmses[modellabel] = rmse(y_test, SGD.predict(X_test))
rmse_df['RMSE-SGD'] = pd.Series(new_rmses)
rmse_df
```
Notice how high the error values are! The algorithm is diverging. This can be due to scaling and/or learning rate being too high. Let's adjust the learning rate and see what happens.
* Pass in `eta0=1e-7` when creating the instance of `SGDClassifier`.
* Re-compute the errors for all the penalties and compare.
```
# Import SGDRegressor and prepare the parameters
from sklearn.linear_model import SGDRegressor
model_parameters_dict = {
'Linear': {'penalty': 'none'},
'Lasso': {'penalty': 'l2',
'alpha': lassoCV.alpha_},
'Ridge': {'penalty': 'l1',
'alpha': ridgeCV_rmse},
'ElasticNet': {'penalty': 'elasticnet',
'alpha': elasticNetCV.alpha_,
'l1_ratio': elasticNetCV.l1_ratio_}
}
new_rmses = {}
for modellabel, parameters in model_parameters_dict.items():
# following notation passes the dict items as arguments
SGD = SGDRegressor(eta0=1e-7, **parameters)
SGD.fit(X_train, y_train)
new_rmses[modellabel] = rmse(y_test, SGD.predict(X_test))
rmse_df['RMSE-SGD-learningrate'] = pd.Series(new_rmses)
rmse_df
```
Now let's scale our training data and try again.
* Fit a `MinMaxScaler` to `X_train` create a variable `X_train_scaled`.
* Using the scaler, transform `X_test` and create a variable `X_test_scaled`.
* Apply the same versions of SGD to them and compare the results. Don't pass in a eta0 this time.
```
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
new_rmses = {}
for modellabel, parameters in model_parameters_dict.items():
# following notation passes the dict items as arguments
SGD = SGDRegressor(**parameters)
SGD.fit(X_train_scaled, y_train)
new_rmses[modellabel] = rmse(y_test, SGD.predict(X_test_scaled))
rmse_df['RMSE-SGD-scaled'] = pd.Series(new_rmses)
rmse_df
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
new_rmses = {}
for modellabel, parameters in model_parameters_dict.items():
# following notation passes the dict items as arguments
SGD = SGDRegressor(**parameters)
SGD.fit(X_train_scaled, y_train)
new_rmses[modellabel] = rmse(y_test, SGD.predict(X_test_scaled))
rmse_df['RMSE-SGD-scaled'] = pd.Series(new_rmses)
rmse_df
```
---
### Machine Learning Foundation (C) 2020 IBM Corporation
| github_jupyter |
## Please input your directory for the top level folder
folder name : SUBMISSION MODEL
```
dir_ = 'INPUT-PROJECT-DIRECTORY/submission_model/' # input only here
```
#### setting other directory
```
raw_data_dir = dir_+'2. data/'
processed_data_dir = dir_+'2. data/processed/'
log_dir = dir_+'4. logs/'
model_dir = dir_+'5. models/'
####################################################################################
####################### 1-2. recursive model by store & cat ########################
####################################################################################
ver, KKK = 'priv', 0
STORES = ['CA_1', 'CA_2', 'CA_3', 'CA_4', 'TX_1', 'TX_2', 'TX_3', 'WI_1', 'WI_2', 'WI_3']
CATS = ['HOBBIES','HOUSEHOLD', 'FOODS']
# General imports
import numpy as np
import pandas as pd
import os, sys, gc, time, warnings, pickle, psutil, random
# custom imports
from multiprocessing import Pool
warnings.filterwarnings('ignore')
########################### Helpers
#################################################################################
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
## Multiprocess Runs
def df_parallelize_run(func, t_split):
num_cores = np.min([N_CORES,len(t_split)])
pool = Pool(num_cores)
df = pd.concat(pool.map(func, t_split), axis=1)
pool.close()
pool.join()
return df
########################### Helper to load data by store ID
#################################################################################
# Read data
def get_data_by_store(store, dept):
df = pd.concat([pd.read_pickle(BASE),
pd.read_pickle(PRICE).iloc[:,2:],
pd.read_pickle(CALENDAR).iloc[:,2:]],
axis=1)
df = df[df['d']>=START_TRAIN]
df = df[(df['store_id']==store) & (df['cat_id']==dept)]
df2 = pd.read_pickle(MEAN_ENC)[mean_features]
df2 = df2[df2.index.isin(df.index)]
df3 = pd.read_pickle(LAGS).iloc[:,3:]
df3 = df3[df3.index.isin(df.index)]
df = pd.concat([df, df2], axis=1)
del df2
df = pd.concat([df, df3], axis=1)
del df3
features = [col for col in list(df) if col not in remove_features]
df = df[['id','d',TARGET]+features]
df = df.reset_index(drop=True)
return df, features
# Recombine Test set after training
def get_base_test():
base_test = pd.DataFrame()
for store_id in STORES:
for state_id in CATS:
temp_df = pd.read_pickle(processed_data_dir+'test_'+store_id+'_'+state_id+'.pkl')
temp_df['store_id'] = store_id
temp_df['cat_id'] = state_id
base_test = pd.concat([base_test, temp_df]).reset_index(drop=True)
return base_test
########################### Helper to make dynamic rolling lags
#################################################################################
def make_lag(LAG_DAY):
lag_df = base_test[['id','d',TARGET]]
col_name = 'sales_lag_'+str(LAG_DAY)
lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(LAG_DAY)).astype(np.float16)
return lag_df[[col_name]]
def make_lag_roll(LAG_DAY):
shift_day = LAG_DAY[0]
roll_wind = LAG_DAY[1]
lag_df = base_test[['id','d',TARGET]]
col_name = 'rolling_mean_tmp_'+str(shift_day)+'_'+str(roll_wind)
lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(shift_day).rolling(roll_wind).mean())
return lag_df[[col_name]]
########################### Model params
#################################################################################
import lightgbm as lgb
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'tweedie',
'tweedie_variance_power': 1.1,
'metric': 'rmse',
'subsample': 0.5,
'subsample_freq': 1,
'learning_rate': 0.015,
'num_leaves': 2**8-1,
'min_data_in_leaf': 2**8-1,
'feature_fraction': 0.5,
'max_bin': 100,
'n_estimators': 3000,
'boost_from_average': False,
'verbose': -1
}
########################### Vars
#################################################################################
VER = 1
SEED = 42
seed_everything(SEED)
lgb_params['seed'] = SEED
N_CORES = psutil.cpu_count()
#LIMITS and const
TARGET = 'sales'
START_TRAIN = 700
END_TRAIN = 1941 - 28*KKK
P_HORIZON = 28
USE_AUX = False
remove_features = ['id','cat_id', 'state_id','store_id',
'date','wm_yr_wk','d',TARGET]
mean_features = ['enc_store_id_dept_id_mean','enc_store_id_dept_id_std',
'enc_item_id_store_id_mean','enc_item_id_store_id_std']
ORIGINAL = raw_data_dir
BASE = processed_data_dir+'grid_part_1.pkl'
PRICE = processed_data_dir+'grid_part_2.pkl'
CALENDAR = processed_data_dir+'grid_part_3.pkl'
LAGS = processed_data_dir+'lags_df_28.pkl'
MEAN_ENC = processed_data_dir+'mean_encoding_df.pkl'
SHIFT_DAY = 28
N_LAGS = 15
LAGS_SPLIT = [col for col in range(SHIFT_DAY,SHIFT_DAY+N_LAGS)]
ROLS_SPLIT = []
for i in [1,7,14]:
for j in [7,14,30,60]:
ROLS_SPLIT.append([i,j])
########################### Train Models
#################################################################################
from lightgbm import LGBMRegressor
from gluonts.model.rotbaum._model import QRX
for store_id in STORES:
for state_id in CATS:
print('Train', store_id, state_id)
grid_df, features_columns = get_data_by_store(store_id, state_id)
train_mask = grid_df['d']<=END_TRAIN
valid_mask = train_mask&(grid_df['d']>(END_TRAIN-P_HORIZON))
preds_mask = (grid_df['d']>(END_TRAIN-100)) & (grid_df['d'] <= END_TRAIN+P_HORIZON)
# train_data = lgb.Dataset(grid_df[train_mask][features_columns],
# label=grid_df[train_mask][TARGET])
# valid_data = lgb.Dataset(grid_df[valid_mask][features_columns],
# label=grid_df[valid_mask][TARGET])
seed_everything(SEED)
estimator = QRX(model=LGBMRegressor(**lgb_params),#lgb_wrapper(**lgb_params),
min_bin_size=200)
estimator.fit(
grid_df[train_mask][features_columns],
grid_df[train_mask][TARGET],
max_sample_size=1000000,
seed=SEED,
eval_set=(
grid_df[valid_mask][features_columns],
grid_df[valid_mask][TARGET]
),
verbose=100,
x_train_is_dataframe=True
)
# estimator = lgb.train(lgb_params,
# train_data,
# valid_sets = [valid_data],
# verbose_eval = 100
#
# )
# display(pd.DataFrame({'name':estimator.feature_name(),
# 'imp':estimator.feature_importance()}).sort_values('imp',ascending=False).head(25))
grid_df = grid_df[preds_mask].reset_index(drop=True)
keep_cols = [col for col in list(grid_df) if '_tmp_' not in col]
grid_df = grid_df[keep_cols]
d_sales = grid_df[['d','sales']]
substitute = d_sales['sales'].values
substitute[(d_sales['d'] > END_TRAIN)] = np.nan
grid_df['sales'] = substitute
grid_df.to_pickle(processed_data_dir+'test_'+store_id+'_'+state_id+'.pkl')
model_name = model_dir+'lgb_model_'+store_id+'_'+state_id+'_v'+str(VER)+'.bin'
pickle.dump(estimator, open(model_name, 'wb'))
del grid_df, d_sales, substitute, estimator#, train_data, valid_data
gc.collect()
MODEL_FEATURES = features_columns
```
| github_jupyter |
```
from __future__ import print_function
import os
import pandas as pd
import numpy as np
%matplotlib inline
from matplotlib import pyplot as plt
#Read dataset into pandas DataFrame
df = pd.read_csv('datasets/chemical-concentration-readings.csv')
#Let's see the shape of the dataset
print('Shape of the dataset:', df.shape)
#Let's see first 10 rows of the DataFrame
df.head(10)
#The observations seem to be taken at an interval of 2 hours
#Parse the timestamp to datetime row index of the DataFrame
datetime_rowid = df['Timestamp'].map(lambda t: pd.to_datetime(t, format='%Y-%m-%d %H:%M:%S'))
df.index = datetime_rowid
df.head(10)
#Resample and compute daily mean
daily = df['Chemical conc.'].resample('D')
daily_mean = daily.mean()
#Plot original time series and daily mean
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
df['Chemical conc.'].plot(ax=ax, color='b')
daily_mean.plot(ax=ax, color='r')
ax.set_title('Bi-hourly reading (blue) & Daily Mean (red)')
ax.set_xlabel('Days in Jan 1975')
ax.set_ylabel('Chemical concentration')
plt.savefig('plots/ch2/B07887_02_02.png', format='png', dpi=300)
"""
Let us shown an example of grouping by a period
"""
#Load the DataFrame and re-index the row to datetime64
df = pd.read_csv('datasets/mean-daily-temperature-fisher-river.csv')
df.index = df['Date'].map(lambda d: pd.to_datetime(d, format = '%Y-%m-%d'))
#Display shape of the DataFrame
print('Shape of dataframe:', df.shape)
#Let's see first 10 rows
df.head(10)
#Plot original time series on daily mean temparature
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
df['Mean temparature'].plot(ax=ax, color='b')
ax.set_title('Mean daily temparature')
plt.savefig('plots/ch2/B07887_02_03.png', format='png', dpi=300)
#We need to groupby the data for every month and find aggregate statistics
#Let's start by adding a Month_Year column
df['Month_Year'] = df.index.map(lambda d: d.strftime('%m-%Y'))
df.head(10)
#Calculate month wise statistics
monthly_stats = df.groupby(by='Month_Year')['Mean temparature'].aggregate([np.mean, np.median,
np.std
])
monthly_stats.reset_index(inplace=True)
monthly_stats.head(10)
#Let's create month and year columns and sort by them to reorder the rows
monthly_stats['Year'] = monthly_stats['Month_Year']\
.map(lambda m: pd.to_datetime(m, format='%m-%Y').strftime('%Y'))
monthly_stats['Month'] = monthly_stats['Month_Year']\
.map(lambda m: pd.to_datetime(m, format='%m-%Y').strftime('%m'))
monthly_stats.sort_values(by=['Year', 'Month'], inplace=True)
monthly_stats.head(10)
#Let's set the Month_Year as the row index
monthly_stats.index = monthly_stats['Month_Year']
#Plot original time series and daily mean
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
monthly_stats['mean'].plot(ax=ax, color='b')
monthly_stats['std'].plot(ax=ax, color='r')
ax.set_title('Monthly statistics: Mean (blue) & Std. Dev. (red)')
plt.savefig('plots/ch2/B07887_02_04.png', format='png', dpi=300)
#Now we will calculate weekly moving average on the original time series of mean daily temparature
weekly_moving_average = df['Mean temparature'].rolling(7).mean()
#Now we will calculate monthly moving average on the original time series of mean daily temparature
monthly_moving_average = df['Mean temparature'].rolling(30).mean()
#Let's caluclate the weekly and monthly avergaes with a stride of length 2
weekly_moving_average_2stride = df['Mean temparature'].rolling(7).mean()[::2]
monthly_moving_average_2stride = df['Mean temparature'].rolling(30).mean()[::2]
#Plot original time series and weekly moving average
fig, axarr = plt.subplots(3, sharex=True)
fig.set_size_inches(5.5, 5,5)
df['Mean temparature'].plot(ax=axarr[0], color='b')
axarr[0].set_title('Daily mean temparature')
weekly_moving_average.plot(ax=axarr[1], color='r')
axarr[1].set_title('Weekly moving average')
monthly_moving_average.plot(ax=axarr[2], color='g')
axarr[2].set_title('Monthly moving average')
plt.savefig('plots/ch2/B07887_02_05.png', format='png', dpi=300)
```
| github_jupyter |
```
import sys
sys.path.append(r"D:\work\nlp")
from fennlp.datas import dataloader
import tensorflow as tf
from fennlp.datas.checkpoint import LoadCheckpoint
from fennlp.datas.dataloader import TFWriter, TFLoader
from fennlp.metrics import Metric
from fennlp.metrics.crf import CrfLogLikelihood
from fennlp.models import bert
from fennlp.optimizers import optim
from fennlp.tools import bert_init_weights_from_checkpoint
load_check = LoadCheckpoint(language='zh', is_download=False, file_path=r"D:\work\nlp\tests\NER\NER_ZH\chinese_L-12_H-768_A-12")
param, vocab_file, model_path = load_check.load_bert_param()
param.maxlen = 64
param.batch_size = 16
class BERT_NER(tf.keras.Model):
def __init__(self, param, **kwargs):
super(BERT_NER, self).__init__(**kwargs)
self.batch_size = param.batch_size
self.maxlen = param.maxlen
self.label_size = param.label_size
self.bert = bert.BERT(param)
self.dense = tf.keras.layers.Dense(self.label_size, activation="relu")
self.crf = CrfLogLikelihood()
def call(self, inputs, is_training=True):
# 数据切分
input_ids, token_type_ids, input_mask, Y = tf.split(inputs, 4, 0)
input_ids = tf.cast(tf.squeeze(input_ids, axis=0), tf.int64)
token_type_ids = tf.cast(tf.squeeze(token_type_ids, axis=0), tf.int64)
input_mask = tf.cast(tf.squeeze(input_mask, axis=0), tf.int64)
Y = tf.cast(tf.squeeze(Y, axis=0), tf.int64)
# 模型构建
bert = self.bert([input_ids, token_type_ids, input_mask], is_training)
sequence_output = bert.get_sequence_output() # batch,sequence,768
predict = self.dense(sequence_output)
predict = tf.reshape(predict, [self.batch_size, self.maxlen, -1])
# 损失计算
log_likelihood, transition = self.crf(predict, Y, sequence_lengths=tf.reduce_sum(input_mask, 1))
loss = tf.math.reduce_mean(-log_likelihood)
predict, viterbi_score = self.crf.crf_decode(predict, transition,
sequence_length=tf.reduce_sum(input_mask, 1))
return loss, predict
def predict(self, inputs, is_training=False):
loss, predict = self(inputs, is_training)
return predict
writer = TFWriter(param.maxlen, vocab_file, modes=["test"], input_dir="ner_data", output_dir="ner_data", check_exist=False)
loader = TFLoader(param.maxlen, param.batch_size, input_dir="ner_data")
ds = loader.load_test()
X, token_type_id, input_mask, Y = ds.__iter__().next()
[X, token_type_id, input_mask, Y]
```
| github_jupyter |
<img src="https://upload.wikimedia.org/wikipedia/commons/4/47/Logo_UTFSM.png" width="200" alt="utfsm-logo" align="left"/>
# MAT281
### Aplicaciones de la Matemática en la Ingeniería
## Módulo 03
## Laboratorio Clase 02: Visualización Imperativa
### Instrucciones
* Completa tus datos personales (nombre y rol USM) en siguiente celda.
* La escala es de 0 a 4 considerando solo valores enteros.
* Debes _pushear_ tus cambios a tu repositorio personal del curso.
* Como respaldo, debes enviar un archivo .zip con el siguiente formato `mXX_cYY_lab_apellido_nombre.zip` a alonso.ogueda@gmail.com, debe contener todo lo necesario para que se ejecute correctamente cada celda, ya sea datos, imágenes, scripts, etc.
* Se evaluará:
- Soluciones
- Código
- Que Binder esté bien configurado.
- Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error.
* __La entrega es al final de esta clase.__
__Nombre__:
__Rol__:
## Datos: _European Union lesbian, gay, bisexual and transgender survey (2012)_
Link a los datos [aquí](https://www.kaggle.com/ruslankl/european-union-lgbt-survey-2012).
### Contexto
La FRA (Agencia de Derechos Fundamentales) realizó una encuesta en línea para identificar cómo las personas lesbianas, gays, bisexuales y transgénero (LGBT) que viven en la Unión Europea y Croacia experimentan el cumplimiento de sus derechos fundamentales. La evidencia producida por la encuesta apoyará el desarrollo de leyes y políticas más efectivas para combatir la discriminación, la violencia y el acoso, mejorando la igualdad de trato en toda la sociedad. La necesidad de una encuesta de este tipo en toda la UE se hizo evidente después de la publicación en 2009 del primer informe de la FRA sobre la homofobia y la discriminación por motivos de orientación sexual o identidad de género, que destacó la ausencia de datos comparables. La Comisión Europea solicitó a FRA que recopilara datos comparables en toda la UE sobre este tema. FRA organizó la recopilación de datos en forma de una encuesta en línea que abarca todos los Estados miembros de la UE y Croacia. Los encuestados eran personas mayores de 18 años, que se identifican como lesbianas, homosexuales, bisexuales o transgénero, de forma anónima. La encuesta se hizo disponible en línea, de abril a julio de 2012, en los 23 idiomas oficiales de la UE (excepto irlandés) más catalán, croata, luxemburgués, ruso y turco. En total, 93,079 personas LGBT completaron la encuesta. Los expertos internos de FRA diseñaron la encuesta que fue implementada por Gallup, uno de los líderes del mercado en encuestas a gran escala. Además, organizaciones de la sociedad civil como ILGA-Europa (Región Europea de la Asociación Internacional de Lesbianas, Gays, Bisexuales, Trans e Intersexuales) y Transgender Europe (TGEU) brindaron asesoramiento sobre cómo acercarse mejor a las personas LGBT.
Puede encontrar más información sobre la metodología de la encuesta en el [__Informe técnico de la encuesta LGBT de la UE. Metodología, encuesta en línea, cuestionario y muestra__](https://fra.europa.eu/sites/default/files/eu-lgbt-survey-technical-report_en.pdf).
### Contenido
El conjunto de datos consta de 5 archivos .csv que representan 5 bloques de preguntas: vida cotidiana, discriminación, violencia y acoso, conciencia de los derechos, preguntas específicas de personas transgénero.
El esquema de todas las tablas es idéntico:
* `CountryCode` - name of the country
* `subset` - Lesbian, Gay, Bisexual women, Bisexual men or Transgender (for Transgender Specific Questions table the value is only Transgender)
* `question_code` - unique code ID for the question
* `question_label` - full question text
* `answer` - answer given
* `percentage`
* `notes` - [0]: small sample size; [1]: NA due to small sample size; [2]: missing value
En el laboratorio de hoy solo utilizaremos los relacionados a la vida cotidiana, disponibles en el archivo `LGBT_Survey_DailyLife.csv` dentro de la carpeta `data`.
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
daily_life_raw = pd.read_csv(os.path.join("data", "LGBT_Survey_DailyLife.csv"))
daily_life_raw.head()
daily_life_raw.info()
daily_life_raw.describe(include="all").T
questions = (
daily_life_raw.loc[: , ["question_code", "question_label"]]
.drop_duplicates()
.set_index("question_code")
.squeeze()
)
for idx, value in questions.items():
print(f"Question code {idx}:\n\n{value}\n\n")
```
### Preprocesamiento de datos
¿Te fijaste que la columna `percentage` no es numérica? Eso es por los registros con notes `[1]`, por lo que los eliminaremos.
```
daily_life_raw.notes.unique()
daily_life = (
daily_life_raw.query("notes != ' [1] '")
.astype({"percentage": "int"})
.drop(columns=["question_label", "notes"])
.rename(columns={"CountryCode": "country"})
)
daily_life.head()
```
## Ejercicio 1 (1 pto)
¿A qué tipo de dato (nominal, ordinal, discreto, continuo) corresponde cada columna del DataFrame `daily_life`?
__Respuesta:__
* `country`:
* `subset`:
* `question_code`:
* `answer`:
* `percentage`:
## Ejercicio 2 (1 pto)
Crea un nuevo dataframe `df1` tal que sean solo registros de Bélgica, la pregunta con código `b1_b` y que hayan respondido _Very widespread_.
Ahora, crea un gráfico de barras vertical con la función `bar` de `matplotlib` para mostrar el porcentaje de respuestas por cada grupo. La figura debe ser de tamaño 10 x 6 y el color de las barras verde.
```
print(f"Question b1_b:\n\n{questions['b1_b']}")
df1 = daily_life.query("country == #FIXME and question_code == #FIXME and answer == #FIXME")
df1
x = # FIX ME
y = # FIX ME
fig = plt.figure(figsize=(10, 6))
plt.# FIX ME
plt.show()
```
## Ejercicio 3 (1 pto)
Respecto a la pregunta con código `g5`, ¿Cuál es el porcentage promedio por cada valor de la respuesta (notar que la respuestas a las preguntas son numéricas)?
```
print(f"Question g5:\n\n{questions['g5']}")
```
Crea un DataFrame llamado `df2` tal que:
1. Solo sean registros con la pregunta con código `g5`
2. Cambia el tipo de la columna `answer` a `int`.
3. Agrupa por país y respuesta y calcula el promedio a la columna porcentaje (usa `agg`).
4. Resetea los índices.
```
df2 = (
daily_life.query("question_code == #FIXME")
.astype({"answer": # FIX ME})
.groupby(# FIX ME)
.agg(# FIX ME)
.# FIX ME
)
df2
```
Crea un DataFrame llamado `df2_mean` tal que:
1. Agrupa `df2` por respuesta y calcula el promedio del porcentaje.
2. Resetea los índices.
```
df2_mean = df2.groupby(# FIX ME)# FIX ME ...
df2_mean.head()
```
Ahora, grafica lo siguiente:
1. Una figura con dos columnas, tamaño de figura 15 x 12 y que compartan eje x y eje y. Usar `plt.subplots`.
2. Para el primer _Axe_ (`ax1`), haz un _scatter plot_ tal que el eje x sea los valores de respuestas de `df2`, y el eye y corresponda a los porcentajes de `df2`. Recuerda que en este caso corresponde a promedios por país, por lo que habrán más de 10 puntos en el gráfico..
3. Para el segundo _Axe_ (`ax2`), haz un gráfico de barras horizontal tal que el eje x sea los valores de respuestas de `df2_mean`, y el eye y corresponda a los porcentajes de `df2_mean`.
```
x = # FIX ME
y = # FIX ME
x_mean = # FIX ME
y_mean = # FIX ME
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(15, 12), sharex=True, sharey=True)
ax1.# FIX ME(# FIX ME, # FIX ME)
ax1.grid(alpha=0.3)
ax2.# FIX ME(# FIX ME, # FIX ME)
ax2.grid(alpha=0.3)
fig.show()
```
## Ejercicio 4 (1 pto)
Respecto a la misma pregunta `g5`, cómo se distribuyen los porcentajes en promedio para cada país - grupo?
Utilizaremos el mapa de calor presentado en la clase, para ello es necesario procesar un poco los datos para conformar los elementos que se necesitan.
Crea un DataFrame llamado `df3` tal que:
1. Solo sean registros con la pregunta con código `g5`
2. Cambia el tipo de la columna `answer` a `int`.
3. Agrupa por país y subset, luego calcula el promedio a la columna porcentaje (usa `agg`).
4. Resetea los índices.
5. Pivotea tal que los índices sean los países, las columnas los grupos y los valores el promedio de porcentajes.
6. Llena los valores nulos con cero. Usa `fillna`.
```
from mpl_heatmap import heatmap, annotate_heatmap
df3 = (
daily_life.query("question_code == #FIXME")
.astype(# FIX ME)
.groupby(# FIX ME)
.agg(# FIX ME)
.reset_index()
.pivot(index=# FIX ME, columns=# FIX ME, values=# FIX ME)
.fillna(# FIX ME)
)
df3.head()
```
Finalmente, los ingredientes para el heat map son:
```
countries = df3.index.tolist()
subsets = df3.columns.tolist()
answers = df3.values
```
El mapa de calor debe ser de la siguiente manera:
* Tamaño figura: 15 x 20
* cmap = "YlGn"
* cbarlabel = "Porcentaje promedio (%)"
* Precición en las anotaciones: Flotante con dos decimales.
```
fig, ax = plt.subplots(figsize=(15, 20))
im, cbar = heatmap(# FIX ME, # FIX ME, # FIX ME , ax=ax,
cmap=# FIX ME, cbarlabel=# FIX ME)
texts = annotate_heatmap(im, valfmt="{x:.2f}")
fig.tight_layout()
plt.show()
```
| github_jupyter |
# Basic training functionality
```
from fastai.basic_train import *
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
from fastai.distributed import *
```
[`basic_train`](/basic_train.html#basic_train) wraps together the data (in a [`DataBunch`](/basic_data.html#DataBunch) object) with a pytorch model to define a [`Learner`](/basic_train.html#Learner) object. This is where the basic training loop is defined for the [`fit`](/basic_train.html#fit) function. The [`Learner`](/basic_train.html#Learner) object is the entry point of most of the [`Callback`](/callback.html#Callback) functions that will customize this training loop in different ways (and made available through the [`train`](/train.html#train) module), notably:
- [`Learner.lr_find`](/train.html#lr_find) will launch an LR range test that will help you select a good learning rate
- [`Learner.fit_one_cycle`](/train.html#fit_one_cycle) will launch a training using the 1cycle policy, to help you train your model fast.
- [`Learner.to_fp16`](/train.html#to_fp16) will convert your model in half precision and help you launch a training in mixed precision.
```
show_doc(Learner, title_level=2)
```
The main purpose of [`Learner`](/basic_train.html#Learner) is to train `model` using [`Learner.fit`](/basic_train.html#Learner.fit). After every epoch, all *metrics* will be printed, and will also be available to callbacks.
The default weight decay will be `wd`, which will be handled using the method from [Fixing Weight Decay Regularization in Adam](https://arxiv.org/abs/1711.05101) if `true_wd` is set (otherwise it's L2 regularization). If `bn_wd` is False then weight decay will be removed from batchnorm layers, as recommended in [Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour](https://arxiv.org/abs/1706.02677). You can ensure that batchnorm layer learnable params are trained even for frozen layer groups, by enabling `train_bn`.
To use [discriminative layer training](#Discriminative-layer-training) pass an [`nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) for each layer group to be optimized with different settings.
Any model files created will be saved in `path`/`model_dir`.
You can pass a list of [`callbacks`](/callbacks.html#callbacks) that you have already created, or (more commonly) simply pass a list of callback functions to `callback_fns` and each function will be called (passing `self`) on object initialization, with the results stored as callback objects. For a walk-through, see the [training overview](/training.html) page. You may also want to use an `application` to fit your model, e.g. using the [`create_cnn`](/vision.learner.html#create_cnn) method:
```
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
learn.fit(1)
```
### Model fitting methods
```
show_doc(Learner.fit)
```
Uses [discriminative layer training](#Discriminative-layer-training) if multiple learning rates or weight decay values are passed. To control training behaviour, use the [`callback`](/callback.html#callback) system or one or more of the pre-defined [`callbacks`](/callbacks.html#callbacks).
```
show_doc(Learner.fit_one_cycle)
```
Uses the [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) callback.
```
show_doc(Learner.lr_find)
```
Runs the learning rate finder defined in [`LRFinder`](/callbacks.lr_finder.html#LRFinder), as discussed in [Cyclical Learning Rates for Training Neural Networks](https://arxiv.org/abs/1506.01186).
### See results
```
show_doc(Learner.get_preds)
show_doc(Learner.validate)
show_doc(Learner.show_results)
show_doc(Learner.predict)
show_doc(Learner.pred_batch)
show_doc(Learner.interpret, full_name='interpret')
jekyll_note('This function only works in the vision application.')
```
### Model summary
```
show_doc(Learner.summary)
```
### Test time augmentation
```
show_doc(Learner.TTA, full_name = 'TTA')
```
Applies Test Time Augmentation to `learn` on the dataset `ds_type`. We take the average of our regular predictions (with a weight `beta`) with the average of predictions obtained through augmented versions of the training set (with a weight `1-beta`). The transforms decided for the training set are applied with a few changes `scale` controls the scale for zoom (which isn't random), the cropping isn't random but we make sure to get the four corners of the image. Flipping isn't random but applied once on each of those corner images (so that makes 8 augmented versions total).
### Gradient clipping
```
show_doc(Learner.clip_grad)
```
### Mixed precision training
```
show_doc(Learner.to_fp16)
```
Uses the [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision) callback to train in mixed precision (i.e. forward and backward passes using fp16, with weight updates using fp32), using all [NVIDIA recommendations](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) for ensuring speed and accuracy.
```
show_doc(Learner.to_fp32)
```
### Distributed training
```
show_doc(Learner.distributed, full_name='distributed')
```
### Discriminative layer training
When fitting a model you can pass a list of learning rates (and/or weight decay amounts), which will apply a different rate to each *layer group* (i.e. the parameters of each module in `self.layer_groups`). See the [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/abs/1801.06146) paper for details and experimental results in NLP (we also frequently use them successfully in computer vision, but have not published a paper on this topic yet). When working with a [`Learner`](/basic_train.html#Learner) on which you've called `split`, you can set hyperparameters in four ways:
1. `param = [val1, val2 ..., valn]` (n = number of layer groups)
2. `param = val`
3. `param = slice(start,end)`
4. `param = slice(end)`
If we chose to set it in way 1, we must specify a number of values exactly equal to the number of layer groups. If we chose to set it in way 2, the chosen value will be repeated for all layer groups. See [`Learner.lr_range`](/basic_train.html#Learner.lr_range) for an explanation of the `slice` syntax).
Here's an example of how to use discriminative learning rates (note that you don't actually need to manually call [`Learner.split`](/basic_train.html#Learner.split) in this case, since fastai uses this exact function as the default split for `resnet18`; this is just to show how to customize it):
```
# creates 3 layer groups
learn.split(lambda m: (m[0][6], m[1]))
# only randomly initialized head now trainable
learn.freeze()
learn.fit_one_cycle(1)
# all layers now trainable
learn.unfreeze()
# optionally, separate LR and WD for each group
learn.fit_one_cycle(1, max_lr=(1e-4, 1e-3, 1e-2), wd=(1e-4,1e-4,1e-1))
show_doc(Learner.lr_range)
```
Rather than manually setting an LR for every group, it's often easier to use [`Learner.lr_range`](/basic_train.html#Learner.lr_range). This is a convenience method that returns one learning rate for each layer group. If you pass `slice(start,end)` then the first group's learning rate is `start`, the last is `end`, and the remaining are evenly geometrically spaced.
If you pass just `slice(end)` then the last group's learning rate is `end`, and all the other groups are `end/10`. For instance (for our learner that has 3 layer groups):
```
learn.lr_range(slice(1e-5,1e-3)), learn.lr_range(slice(1e-3))
show_doc(Learner.unfreeze)
```
Sets every layer group to *trainable* (i.e. `requires_grad=True`).
```
show_doc(Learner.freeze)
```
Sets every layer group except the last to *untrainable* (i.e. `requires_grad=False`).
```
show_doc(Learner.freeze_to)
show_doc(Learner.split)
```
A convenience method that sets `layer_groups` based on the result of [`split_model`](/torch_core.html#split_model). If `split_on` is a function, it calls that function and passes the result to [`split_model`](/torch_core.html#split_model) (see above for example).
### Saving and loading models
Simply call [`Learner.save`](/basic_train.html#Learner.save) and [`Learner.load`](/basic_train.html#Learner.load) to save and load models. Only the parameters are saved, not the actual architecture (so you'll need to create your model in the same way before loading weights back in). Models are saved to the `path`/`model_dir` directory.
```
show_doc(Learner.load)
show_doc(Learner.save)
```
### Deploying your model
When you are ready to put your model in production, export the minimal state of your [`Learner`](/basic_train.html#Learner) with
```
show_doc(Learner.export)
```
Then you can load it with the following function.
```
show_doc(load_learner)
```
You can find more information and multiple examples in [this tutorial](/tutorial.inference.html)
### Other methods
```
show_doc(Learner.init)
```
Initializes all weights (except batchnorm) using function `init`, which will often be from PyTorch's [`nn.init`](https://pytorch.org/docs/stable/nn.html#torch-nn-init) module.
```
show_doc(Learner.mixup)
```
Uses [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback).
```
show_doc(Learner.backward)
show_doc(Learner.create_opt)
```
You generally won't need to call this yourself - it's used to create the [`optim`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) optimizer before fitting the model.
```
show_doc(Learner.dl)
show_doc(Recorder, title_level=2)
```
A [`Learner`](/basic_train.html#Learner) creates a [`Recorder`](/basic_train.html#Recorder) object automatically - you do not need to explicitly pass it to `callback_fns` - because other callbacks rely on it being available. It stores the smoothed loss, hyperparameter values, and metrics for each batch, and provides plotting methods for each. Note that [`Learner`](/basic_train.html#Learner) automatically sets an attribute with the snake-cased name of each callback, so you can access this through `Learner.recorder`, as shown below.
### Plotting methods
```
show_doc(Recorder.plot)
```
This is mainly used with the learning rate finder, since it shows a scatterplot of loss vs learning rate.
```
learn = create_cnn(data, models.resnet18, metrics=accuracy)
learn.lr_find()
learn.recorder.plot()
show_doc(Recorder.plot_losses)
```
Note that validation losses are only calculated once per epoch, whereas training losses are calculated after every batch.
```
learn.fit_one_cycle(2)
learn.recorder.plot_losses()
show_doc(Recorder.plot_lr)
learn.recorder.plot_lr(show_moms=True)
show_doc(Recorder.plot_metrics)
```
Note that metrics are only collected at the end of each epoch, so you'll need to train at least two epochs to have anything to show here.
```
learn.recorder.plot_metrics()
```
### Callback methods
You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality.
```
show_doc(Recorder.on_backward_begin)
show_doc(Recorder.on_batch_begin)
show_doc(Recorder.on_epoch_end)
show_doc(Recorder.on_train_begin)
```
### Inner functions
The following functions are used along the way by the [`Recorder`](/basic_train.html#Recorder) or can be called by other callbacks.
```
show_doc(Recorder.add_metrics)
show_doc(Recorder.add_metric_names)
show_doc(Recorder.format_stats)
```
## Module functions
Generally you'll want to use a [`Learner`](/basic_train.html#Learner) to train your model, since they provide a lot of functionality and make things easier. However, for ultimate flexibility, you can call the same underlying functions that [`Learner`](/basic_train.html#Learner) calls behind the scenes:
```
show_doc(fit)
```
Note that you have to create the `Optimizer` yourself if you call this function, whereas [`Learn.fit`](/basic_train.html#fit) creates it for you automatically.
```
show_doc(train_epoch)
```
You won't generally need to call this yourself - it's what [`fit`](/basic_train.html#fit) calls for each epoch.
```
show_doc(validate)
```
This is what [`fit`](/basic_train.html#fit) calls after each epoch. You can call it if you want to run inference on a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) manually.
```
show_doc(get_preds)
show_doc(loss_batch)
```
You won't generally need to call this yourself - it's what [`fit`](/basic_train.html#fit) and [`validate`](/basic_train.html#validate) call for each batch. It only does a backward pass if you set `opt`.
## Other classes
```
show_doc(LearnerCallback, title_level=3)
show_doc(RecordOnCPU, title_level=3)
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(Learner.tta_only)
show_doc(Learner.TTA)
show_doc(RecordOnCPU.on_batch_begin)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
```
import pyspark
import os
from datetime import date
import functools
from IPython.core.display import display, HTML
#import findspark
#findspark.init()
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
import pyspark.sql.types as T
from pyspark.sql.functions import to_timestamp, count, isnan, mean, col, countDistinct, format_number, dayofmonth, hour, dayofyear, month, year, weekofyear, date_format, when,datediff, months_between
import re
from functools import reduce
from funcoes import union_all
spark = SparkSession.builder.appName("SRAG").getOrCreate()
# aumentando a largura do notebook
display(HTML("<style>.container { width:100% !important; }</style>"))
# iniciando spark
spark = SparkSession.builder.appName('Sindrome Gripal').getOrCreate()
# criando lista de dataframes
dfs = [spark.read.csv(os.path.join(os.path.expanduser('~/Área de Trabalho/analises/sindrome_gripal_bruto/'), x), header=True, sep=';') for x in os.listdir('./sindrome_gripal_bruto/') if x.startswith('dados_utf8')]
# contando arquivos carregados corretamente
print(len(dfs))
# concatenando em um unico dataframe - Esse método evita o deslocamento dos valores, caso o número de colunas seja diferente entre os arquivos
df = union_all(dfs)
print(df.columns)
# uma pequena amostra dos dados
df.limit(5).toPandas()
print(f'total de linhas: {df.count()}')
print(f'numero de colunas: {len(df.columns)}')
df.printSchema()
# contagem dos nulls / nan
df.select([F.count(F.when(F.isnan(c) | F.col(c).isNull(), c)).alias(c) for c in df.columns]).limit(1).toPandas().T
# armonizando colunas
rename_columns = {
'dataNascimento': 'dt_nasc', 'dataNotificacao': 'dt_ntf', 'dataInicioSintomas': 'dt_sint',
'dataTeste': 'dt_teste', 'tipoTeste': 'tp_teste', 'resultadoTeste': 'res_teste',
'classificacaoFinal': 'class_final', 'evolucaoCaso': 'ev_caso', 'estadoNotificacao': 'est_ntf',
}
for old_col, new_col in rename_columns.items():
df = df.withColumnRenamed(old_col, new_col)
df.groupby(['res_teste', 'class_final']).count().show(1000, truncate=False)
# Verificando categorias e polarizando
df.groupby('sexo').count().show()
df = df.withColumn('sexo', F.when((df['sexo'] == 'Feminino'), F.lit(1)) \
.when((df['sexo'] == 'Masculino') | (df['sexo'] == 'M'), F.lit(2)) \
.when((df['sexo'] == 'Indefinido') | (df['sexo'] == 'undefined'), F.lit(9)))
df.groupby('sexo').count().show()
df.groupby('class_final').count().show(truncate=False)
df = df.withColumn('class_final', F.when((df['class_final'] == 'Confirmado por Critério Clínico'), F.lit(3)) \
.when((df['class_final'] == 'Confirmado'), F.lit(1)) \
.when((df['class_final'] == 'Confirmado Clinico-Epidemiologico'), F.lit(2)) \
.when((df['class_final'] == 'Confirmação Clínico-Epidemiológico'), F.lit(2)) \
.when((df['class_final'] == 'Confirmado Laboratorial'), F.lit(4)) \
.when((df['class_final'] == 'Confirmado Clinico-Imagem'), F.lit(5)) \
.when((df['class_final'] == 'Síndrome Gripal Não Especificada'), F.lit(6)) \
.when((df['class_final'] == 'Confirmação Clínico Epidemiológico'), F.lit(2)) \
.when((df['class_final'] == 'Sindrome Gripal Nao Especificada'), F.lit(6)) \
.when((df['class_final'] == 'Confirmado Clínico-Imagem'), F.lit(5)) \
.when((df['class_final'] == 'Descartado'), F.lit(7)) \
.when((df['class_final'] == 'Confirmação Laboratorial'), F.lit(4)))
df.groupby('class_final').count().show(truncate=False)
df = df.withColumn('res_teste', F.when((df['res_teste'] == 'Positivo'), F.lit(1)) \
.when((df['res_teste'] == 'undefined') | (df['res_teste'] == 'Inconclusivo ou Indefinido'), F.lit(9)) \
.when((df['res_teste'] == 'Negativo'), F.lit(2)))
df.filter(df['class_final'].isNull()).groupby(['res_teste', 'class_final']).count().show()
df.groupby('tp_teste').count().show(truncate=False)
df.withColumn('tp_teste', F.when((df['tp_teste'] == 'RT-PCR'), F.lit(1)) \
.when((df['tp_teste'] == 'Imunoensaio por Eletroquimioluminescência - ECLIA IgG') | (df['tp_teste'] == 'Imunoensaio por Eletroquimioluminescência ECLIA'), F.lit(2)) \
.when((df['tp_teste'] == 'Quimioluminescência - CLIA'), F.lit(3)) \
.when((df['tp_teste'] == 'Teste rápido'), F.lit(4)) \
.when((df['tp_teste'] == 'Enzimaimunoensaio - ELISA IgM'), F.lit(5)) \
.when((df['tp_teste'] == 'Enzimaimunoensaio ELISA'), F.lit(5)) \
.when((df['tp_teste'] == 'TESTE RÁPIDO - ANTICORPO'), F.lit(6)) \
.when((df['tp_teste'] == 'Concluído'), F.lit(7)) \
.when((df['tp_teste'] == 'TESTE RÁPIDO - ANTÍGENO'), F.lit(4)) \
.when((df['tp_teste'] == 'undefined'), F.lit(9))).groupby('tp_teste').count().show()
df = df.withColumn('tp_teste', F.when((df['tp_teste'] == 'RT-PCR'), F.lit(1)) \
.when((df['tp_teste'] == 'Imunoensaio por Eletroquimioluminescência - ECLIA IgG') | (df['tp_teste'] == 'Imunoensaio por Eletroquimioluminescência ECLIA'), F.lit(2)) \
.when((df['tp_teste'] == 'Quimioluminescência - CLIA'), F.lit(3)) \
.when((df['tp_teste'] == 'Teste rápido'), F.lit(4)) \
.when((df['tp_teste'] == 'Enzimaimunoensaio - ELISA IgM'), F.lit(5)) \
.when((df['tp_teste'] == 'Enzimaimunoensaio ELISA'), F.lit(5)) \
.when((df['tp_teste'] == 'TESTE RÁPIDO - ANTICORPO'), F.lit(6)) \
.when((df['tp_teste'] == 'Concluído'), F.lit(7)) \
.when((df['tp_teste'] == 'TESTE RÁPIDO - ANTÍGENO'), F.lit(4)) \
.when((df['tp_teste'] == 'undefined'), F.lit(9)))
df.groupby('ev_caso').count().show(truncate=False)
df = df.withColumn('ev_caso', F.when((df['ev_caso'] == 'Óbito'), F.lit(1)) \
.when((df['ev_caso'] == 'Cura'), F.lit(2)) \
.when((df['ev_caso'] == 'Internado em UTI'), F.lit(3)) \
.when((df['ev_caso'] == 'Internado'), F.lit(3)) \
.when((df['ev_caso'] == 'Ignorado'), F.lit(9)) \
.when((df['ev_caso'] == 'Cancelado'), F.lit(4)) \
.when((df['ev_caso'] == 'Em tratamento domiciliar'), F.lit(5)))
df.groupby('ev_caso').count().show()
# cont normal até aqui
df.count()
list_var_date = ['dt_sint', 'dt_nasc', 'dt_ntf', 'dt_teste']
for col in list_var_date:
df = df.withColumn(col, F.col(col).cast(T.DateType()))
# filtrando data de nascimento para valores maiores que 1930
df = df.filter((F.year(df['dt_nasc']) >= 1930) & (F.year(df['dt_nasc']) < date.today().year))
df.count()
print(df.columns)
df2 = df.select('dt_ntf', 'dt_nasc', 'dt_teste', 'sexo', 'class_final', 'res_teste', 'tp_teste', 'ev_caso', 'idade')
df2.show()
df2 = df2.withColumn('idade',
F.when((F.col('idade') >= 100) | (F.col('idade').isNull()),
F.floor(F.datediff(
F.current_date(),
F.to_date(F.col('dt_nasc'))))) \
.otherwise(F.col('idade')))
df2.show()
var_fx = {
'0': 4, '1': 4, '2': 4, '3': 4, '4': 4, '5': 509, '6': 509, '7': 509, '8': 509, '9': 509, '10': 1014,
'11': 1014, '12': 1014, '13': 1014, '14': 1014, '15': 1519, '16': 1519, '17': 1519, '18': 1519, '19': 1519,
'20': 2024, '21': 2024, '22': 2024, '23': 2024, '24': 2024, '25': 2529, '26': 2529, '27': 2529, '28': 2529,
'29': 2529, '30': 3034, '31': 3034, '32': 3034, '33': 3034, '34': 3034, '35': 3539, '36': 3539, '37': 3539,
'38': 3539, '39': 3539, '40': 4044, '41': 4044, '42': 4044, '43': 4044, '44': 4044, '45': 4549, '46': 4549,
'47': 4549, '48': 4549, '49': 4549, '50': 5054, '51': 5054, '52': 5054, '53': 5054, '54': 5054, '55': 5559,
'56': 5559, '57': 5559, '58': 5559, '59': 5559, '60': 6064, '61': 6064, '62': 6064, '63': 6064, '64': 6064,
'65': 6569, '66': 6569, '67': 6569, '68': 6569, '69': 6569, '70': 7074, '71': 7074, '72': 7074, '73': 7074,
'74': 7074, '75': 8099, '76': 8099, '77': 8099, '78': 8099, '79': 8099, '80': 8099, '81': 8099, '82': 8099,
'83': 8099, '84': 8099, '85': 8099, '86': 8099, '87': 8099, '88': 8099, '89': 8099, '90': 8099, '91': 8099,
'92': 8099, '93': 8099, '94': 8099, '95': 8099, '96': 8099, '97': 8099, '98': 8099, '99': 8099, '100': 8099,
}
df2 = df2.withColumn('fx_etaria', F.udf(lambda x: var_fx[x], T.IntegerType())(df2['idade']))
df2.show(3, 0)
df2 = df2.withColumn('evolucaoCaso', F.when(df2['ev_caso'] == 1, F.lit(1)))#df3 = df3.select('dt_ntf', 'dt_teste', 'sexo', 'class_final', 'res_teste', 'tp_teste', 'ev_caso', 'fx_etaria'
df3 = df2.groupby(['dt_ntf', 'dt_teste', 'sexo', 'class_final', 'res_teste', 'tp_teste', 'ev_caso', 'fx_etaria', 'evolucaoCaso']).count()
df3 = df3.withColumnRenamed('count', 'casos')
df3.show(5, 0)
df3.coalesce(1).write.parquet('sg')
```
| github_jupyter |
# Table of Contents
<div class="toc" style="margin-top: 1em;"><ul class="toc-item" id="toc-level0"><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Load-libraries" data-toc-modified-id="Load-libraries-1"><span class="toc-item-num">1 </span>Load libraries</a></span></li><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Define-loss-functions" data-toc-modified-id="Define-loss-functions-2"><span class="toc-item-num">2 </span>Define loss functions</a></span></li><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Define-models" data-toc-modified-id="Define-models-3"><span class="toc-item-num">3 </span>Define models</a></span></li><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Training" data-toc-modified-id="Training-4"><span class="toc-item-num">4 </span>Training</a></span><ul class="toc-item"><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Functions,-generators-and-data" data-toc-modified-id="Functions,-generators-and-data-4.1"><span class="toc-item-num">4.1 </span>Functions, generators and data</a></span></li><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Training" data-toc-modified-id="Training-4.2"><span class="toc-item-num">4.2 </span>Training</a></span></li></ul></li><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Predictions" data-toc-modified-id="Predictions-5"><span class="toc-item-num">5 </span>Predictions</a></span></li><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Submission" data-toc-modified-id="Submission-6"><span class="toc-item-num">6 </span>Submission</a></span></li><li><span><a href="http://localhost:8889/notebooks/19-full-res-model-all-angles-vertical-cut-no-bbox.ipynb#Pseudo-labeling" data-toc-modified-id="Pseudo-labeling-7"><span class="toc-item-num">7 </span>Pseudo labeling</a></span></li></ul></div>
# Load libraries
```
import cv2
import numpy as np
import pandas as pd
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Activation, UpSampling2D, BatchNormalization
from keras.optimizers import RMSprop
from keras.losses import binary_crossentropy
import keras.backend as K
from sklearn.model_selection import train_test_split
import math
import random
import gzip
import pickle
import matplotlib.pyplot as plt
%matplotlib inline
```
# Define loss functions
```
def dice_coeff(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
```
# Define models
```
def unet_down_one_block(inputs, num_filters):
x = Conv2D(num_filters, (3, 3), padding='same')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(num_filters, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def unet_max_pool(inputs):
x = MaxPooling2D((2, 2), strides=(2, 2))(inputs)
return x
def unet_up_one_block(up_input, down_input, num_filters):
x = UpSampling2D((2,2))(up_input)
x = concatenate([down_input, x], axis=3)
x = Conv2D(num_filters, (3,3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(num_filters, (3,3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(num_filters, (3,3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(input_shape = (256, 256, 3),
num_classes = 1,
initial_filters = 32,
central_filters = 1024):
num_filters = initial_filters
out_list = [Input(shape=input_shape)]
down_interim_list = []
while num_filters <= central_filters/2:
x = unet_down_one_block(out_list[-1], num_filters)
down_interim_list.append(x)
num_filters = num_filters * 2
y = unet_max_pool(x)
out_list.append(y)
x = unet_down_one_block(out_list[-1], num_filters)
out_list.append(x)
num_filters = int(num_filters / 2)
while num_filters >= initial_filters:
x = unet_up_one_block(out_list[-1], down_interim_list.pop(), num_filters)
out_list.append(x)
num_filters = int(num_filters / 2)
classify = Conv2D(num_classes, (1,1), activation = 'sigmoid')(out_list[-1])
model = Model(inputs=out_list[0], outputs=classify)
return model
model = get_unet(input_shape=(1280,1024,3), initial_filters=8)
model.load_weights('./weights/best_weights_fullres3.hdf5')
```
# Training
## Functions, generators and data
```
df_train = pd.read_csv('data/train_masks.csv')
ids_train = df_train['img'].map(lambda s: s.split('.')[0])
ids_train_split, ids_valid_split = train_test_split(ids_train, test_size=0.2, random_state=42)
ids_train_split = list(ids_train_split)
ids_valid_split = list(ids_valid_split)
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image, mask,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1]) # degree
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image, mask
def randomHorizontalFlip(image, mask, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
mask = cv2.flip(mask, 1)
return image, mask
def train_generator(train_batch_size):
while True:
this_ids_train_split = random.sample(ids_train_split, len(ids_train_split))
for start in range(0, len(ids_train_split), train_batch_size):
x_batch = []
y_batch = []
end = min(start + train_batch_size, len(ids_train_split))
ids_train_batch = this_ids_train_split[start:end]
for id in ids_train_batch:
img = cv2.imread('data/train/{}.jpg'.format(id))
mask = cv2.imread('data/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
img1 = np.copy(img[:, 0:1024, :])
mask1 = np.copy(mask[:, 0:1024])
mask1 = np.expand_dims(mask1, axis=2)
img2 = np.copy(img[:, 894:, :])
mask2 = np.copy(mask[:, 894:])
mask2 = np.expand_dims(mask2, axis=2)
x_batch.append(img1)
y_batch.append(mask1)
x_batch.append(img2)
y_batch.append(mask2)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch, y_batch
def valid_generator(val_batch_size):
while True:
for start in range(0, len(ids_valid_split), val_batch_size):
x_batch = []
y_batch = []
end = min(start + val_batch_size, len(ids_valid_split))
ids_valid_batch = ids_valid_split[start:end]
for id in ids_valid_batch:
img = cv2.imread('data/train/{}.jpg'.format(id))
mask = cv2.imread('data/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
img1 = np.copy(img[:, 0:1024, :])
mask1 = np.copy(mask[:, 0:1024])
mask1 = np.expand_dims(mask1, axis=2)
img2 = np.copy(img[:, 894:, :])
img2 = cv2.flip(img2, 1)
mask2 = np.copy(mask[:, 894:])
mask2 = cv2.flip(mask2, 1)
mask2 = np.expand_dims(mask2, axis=2)
x_batch.append(img1)
y_batch.append(mask1)
x_batch.append(img2)
y_batch.append(mask2)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch, y_batch
```
## Training
```
model.compile(optimizer=RMSprop(lr=0.00001), loss=bce_dice_loss, metrics=[dice_coeff])
train_batch_size = 3
val_batch_size = 14
max_epochs = 50
callbacks = [EarlyStopping(monitor='val_loss',
patience=5,
verbose=1,
min_delta=1e-4),
ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=3,
verbose=1,
epsilon=1e-4),
ModelCheckpoint(monitor='val_loss',
filepath='weights/vert_cut.hdf5',
save_best_only=True,
save_weights_only=True)]
history = model.fit_generator(generator=train_generator(train_batch_size),
steps_per_epoch=np.ceil(float(len(ids_train_split)) / float(train_batch_size)),
epochs=max_epochs,
verbose=2,
callbacks=callbacks,
validation_data=valid_generator(val_batch_size),
validation_steps=np.ceil(float(len(ids_valid_split)) / float(val_batch_size)))
history.history
```
# Predictions
```
model.load_weights('weights/vert_cut.hdf5')
model.evaluate_generator(valid_generator(val_batch_size), np.ceil(float(len(ids_valid_split)) / float(val_batch_size)))
from tqdm import tqdm
df_test = pd.read_csv('data/sample_submission.csv')
ids_test = df_test['img'].map(lambda s: s.split('.')[0])
names = []
for id in ids_test:
names.append('{}.jpg'.format(id))
# https://www.kaggle.com/stainsby/fast-tested-rle
def run_length_encode(mask):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
inds = mask.flatten()
runs = np.where(inds[1:] != inds[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
rle = ' '.join([str(r) for r in runs])
return rle
import bcolz
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def save_test_predictions(ids_test, fname):
val_batch_size=12
all_preds=[]
for start in range(0, len(ids_test), val_batch_size):
x_batch = []
end = min(start + val_batch_size, len(ids_test))
ids_test_batch = ids_test[start:end]
for id in ids_test_batch.values:
img = cv2.imread('data/test/{}.jpg'.format(id))
img1 = np.copy(img[:, 0:1024, :])
img2 = np.copy(img[:, 894:, :])
img2 = cv2.flip(img2, 1)
x_batch.append(img1)
x_batch.append(img2)
x_batch = np.array(x_batch, np.float32) / 255
preds = model.predict_on_batch(x_batch)
preds = np.squeeze(preds, axis=3)
final_preds = []
for i in range(len(ids_test_batch.values)):
pred_1 = np.copy(preds[(i*2), :, :])
pred_2 = np.copy(preds[(i*2) + 1, :, :])
pred_2 = cv2.flip(pred_2, 1)
left_part = np.copy(pred_1[:, 0:894])
middle_1 = np.copy(pred_1[:, 894:1024])
middle_2 = np.copy(pred_2[:, 0:130])
middle_part = np.add(middle_1, middle_2)/2
right_part = np.copy(pred_2[:, 130:1024])
all_parts = np.concatenate((left_part, middle_part, right_part), axis=1)
final_preds.append(all_parts*100)
final_preds = np.array(final_preds, np.uint8)
all_preds.append(final_preds)
all_preds = np.concatenate(all_preds, axis=0)
save_array(fname, all_preds)
for start in tqdm(range(0, len(ids_test), 240)):
end = min(start + 240, len(ids_test))
ids_test_batch = ids_test[start:end]
save_test_predictions(ids_test_batch, './vert-cut-preds/batch-' + str(start))
```
# Submission
```
rles = []
for start in tqdm(range(0, len(ids_test), 240)):
all_preds = load_array('./vert-cut-preds/batch-' + str(start))
all_preds = all_preds > 50
for i in range(all_preds.shape[0]):
mask = np.copy(all_preds[i,:,:])
rle = run_length_encode(mask)
rles.append(rle)
print("Generating submission file...")
df = pd.DataFrame({'img': names, 'rle_mask': rles})
df.to_csv('submit/submission12.csv.gz', index=False, compression='gzip')
```
# Pseudo labeling
```
def train_generator(train_batch_size):
while True:
this_ids_train_split = random.sample(ids_train_split, len(ids_train_split))
for start in range(0, len(ids_train_split), train_batch_size):
x_batch = []
y_batch = []
end = min(start + train_batch_size, len(ids_train_split))
ids_train_batch = this_ids_train_split[start:end]
for id in ids_train_batch:
img = cv2.imread('data/train/{}.jpg'.format(id))
mask = cv2.imread('data/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
img1 = np.copy(img[:, 0:1024, :])
mask1 = np.copy(mask[:, 0:1024])
mask1 = np.expand_dims(mask1, axis=2)
img2 = np.copy(img[:, 894:, :])
img2 = cv2.flip(img2, 1)
mask2 = np.copy(mask[:, 894:])
mask2 = cv2.flip(mask2, 1)
mask2 = np.expand_dims(mask2, axis=2)
x_batch.append(img1)
y_batch.append(mask1)
x_batch.append(img2)
y_batch.append(mask2)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch, y_batch
def test_generator(test_batch_size):
while True:
this_test_slnos = random.sample(range(len(ids_test)), len(ids_test))
for start in range(0, len(ids_test), test_batch_size):
x_batch = []
y_batch = []
end = min(start + test_batch_size, len(ids_test))
slnos_batch = this_test_slnos[start:end]
for slno in slnos_batch:
batch_num = math.floor(slno/240)*240
batch_preds = load_array('./vert-cut-preds/batch-' + str(batch_num))
within_batch_slno = slno % 240
mask = (batch_preds[within_batch_slno,:,:] > 50).astype(np.float32)
img = cv2.imread('data/test/{}.jpg'.format(ids_test[slno]))
img1 = np.copy(img[:, 0:1024, :])
mask1 = np.copy(mask[:, 0:1024])
mask1 = np.expand_dims(mask1, axis=2)
img2 = np.copy(img[:, 894:, :])
img2 = cv2.flip(img2, 1)
mask2 = np.copy(mask[:, 894:])
mask2 = cv2.flip(mask2, 1)
mask2 = np.expand_dims(mask2, axis=2)
x_batch.append(img1)
y_batch.append(mask1)
x_batch.append(img2)
y_batch.append(mask2)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32)
yield x_batch, y_batch
model.load_weights('weights/vert_cut.hdf5')
val_batch_size = 16
model.compile(optimizer=RMSprop(lr=0.0001), loss=bce_dice_loss, metrics=[dice_coeff])
model.evaluate_generator(valid_generator(val_batch_size), np.ceil(float(len(ids_valid_split)) / float(val_batch_size)))
def pseudo_lab_gen():
train_gen = train_generator(2)
test_gen = test_generator(2)
while True:
train_stuff = next(train_gen)
if train_stuff[0].shape[0] != 4:
train_stuff = next(train_gen)
test_stuff = next(test_gen)
if test_stuff[0].shape[0] != 4:
test_stuff = next(test_gen)
yield np.concatenate((train_stuff[0], test_stuff[0]), axis=0), np.concatenate((train_stuff[1], test_stuff[1]), axis=0)
callbacks = [ModelCheckpoint(monitor='val_loss',
filepath='weights/vert_cut2.hdf5',
save_best_only=True,
save_weights_only=True)]
history = model.fit_generator(generator=pseudo_lab_gen(),
steps_per_epoch=np.ceil(float(len(ids_train_split)) / float(2)),
epochs=10,
verbose=2,
callbacks=callbacks,
validation_data=valid_generator(val_batch_size),
validation_steps=np.ceil(float(len(ids_valid_split)) / float(val_batch_size)))
```
| github_jupyter |
<small><small><i>
All the IPython Notebooks in this **Python Examples** series by Dr. Milaan Parmar are available @ **[GitHub](https://github.com/milaan9/90_Python_Examples)**
</i></small></small>
# Python Program to Make a Simple Calculator
In this example you will learn to create a simple calculator that can add, subtract, multiply or divide depending upon the input from the user.
To understand this example, you should have the knowledge of the following **[Python programming](https://github.com/milaan9/01_Python_Introduction/blob/main/000_Intro_to_Python.ipynb)** topics:
* **[Python Functions](https://github.com/milaan9/04_Python_Functions/blob/main/001_Python_Functions.ipynb)**
* **[Python Function Arguments](https://github.com/milaan9/04_Python_Functions/blob/main/004_Python_Function_Arguments.ipynb)**
* **[Python User-defined Functions](https://github.com/milaan9/04_Python_Functions/blob/main/Python_User_defined_Functions.ipynb)**
* **[Python if-elif-else Statement](https://github.com/milaan9/03_Python_Flow_Control/blob/main/003_Python_if_elif_else_statement%20.ipynb)**
```
# Example 1: Simple Calculator by Using Functions
# This function adds two numbers
def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
while True:
# Take input from the user
choice = input("Enter choice(1/2/3/4): ")
# Check if choice is one of the four options
if choice in ('1', '2', '3', '4'):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '1':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '2':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '3':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '4':
print(num1, "/", num2, "=", divide(num1, num2))
break
else:
print("Invalid Input")
'''
>>Output/Runtime Test Cases:
Select operation.
1.Add
2.Subtract
3.Multiply
4.Divide
Enter choice(1/2/3/4): 3
Enter first number: 3
Enter second number: 9
3.0 * 9.0 = 27.0
'''
```
**Explanation:**
In this program, we ask the user to choose an operation. Options **`1`**, **`2`**, **`3`**, and **`4`** are valid. If any other input is given, **`Invalid Input`** is displayed and the loop continues until a valid option is selected.
Two numbers are taken and an **`if-elif-else`** branching is used to execute a particular section. User-defined functions **`add()`**, **`subtract()`**, **`multiply()`** and **`divide()`** evaluate respective operations and display the output.
| github_jupyter |
# COVID-19 Analysis
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Read the data
confirmed = pd.read_csv('data/time_series_covid19_confirmed_global.csv')
confirmed.rename(columns={'Country/Region':'country'}, inplace=True)
confirmed = confirmed.drop(columns=['Province/State'])
confirmed = confirmed.T
new_header = confirmed.iloc[0] #grab the first row for the header
confirmed = confirmed[3:] #drop latitude and longitude as well
confirmed.columns = new_header #set the header row as the df header
confirmed.head()
deaths = pd.read_csv('data/time_series_covid19_deaths_global.csv')
deaths.rename(columns={'Country/Region':'country'}, inplace=True)
deaths = deaths.drop(columns=['Province/State'])
deaths = deaths.T
new_header = deaths.iloc[0] #grab the first row for the header
deaths = deaths[3:] #drop latitude and longitude as well
deaths.columns = new_header #set the header row as the df header
deaths.head()
recovered = pd.read_csv('data/time_series_covid19_recovered_global.csv')
recovered.rename(columns={'Country/Region':'country'}, inplace=True)
recovered = recovered.drop(columns=['Province/State'])
recovered = recovered.T
new_header = recovered.iloc[0] #grab the first row for the header
recovered = recovered[3:] #drop latitude and longitude as well
recovered.columns = new_header #set the header row as the df header
recovered.head()
```
## Plot one Country
```
plt.figure(figsize=(14,5))
plt.xlabel('Date')
plt.ylabel('Confirmed Cases Pakistan')
plt.title('Coronavirus Confirmed Cases in Pakistan')
plt.plot(pd.to_datetime(confirmed.index),confirmed['Pakistan'])
plt.figure(figsize=(14,5))
plt.xlabel('Date')
plt.ylabel('Confirmed Cases Italy')
plt.title('Coronavirus Confirmed Cases in Italy')
plt.plot(pd.to_datetime(confirmed.index),confirmed['Italy'])
```
## Plot 2 countries
```
plt.figure(figsize=(12,8))
plt.xlabel('Date')
plt.ylabel('Confirmed Cases')
plt.title('Coronavirus Confirmed Cases')
plt.plot(pd.to_datetime(confirmed.index),confirmed['US'])
plt.plot(pd.to_datetime(confirmed.index),confirmed['Italy'])
plt.legend(['US','Italy'])
plt.figure(figsize=(12,8))
plt.xlabel('Date')
plt.ylabel('Deaths')
plt.title('Coronavirus Deaths')
plt.plot(pd.to_datetime(deaths.index),deaths['US'])
plt.plot(pd.to_datetime(deaths.index),deaths['Italy'])
plt.legend(['US','Italy'])
# Plot confirmed and death together
plt.figure(figsize=(12,8))
plt.xlabel('Date')
plt.ylabel('Cases')
plt.title('Coronavirus Cases')
plt.plot(pd.to_datetime(confirmed.index),confirmed['Italy'])
plt.plot(pd.to_datetime(deaths.index),deaths['Italy']*10)
plt.plot(pd.to_datetime(deaths.index),np.repeat(12000,len(confirmed)), '--')
plt.plot(pd.to_datetime(confirmed.index),confirmed['US'])
plt.plot(pd.to_datetime(deaths.index),deaths['US']*10)
plt.legend(['Italy Confirmed','Italy Deathsx10',"Italy's Turning Point",'US Confirmed','US Deathsx10'])
```
Although the US is going to become the next epicenter for the disease in no time, however Italy still has to worry about it's ever-increasing death rate.
After a certain point, the rate of deaths in Italy increased faster than the rate of infections. And before this point both were on separate scales. There might be a number of reasons for the speeding up deaths. On the other hand, the US has not seen that turning point, again it's uncertain to say why. But this definitely demonstrates that in some countries the deaths can speed up faster than new infections.
```
# And for Pakistan
plt.figure(figsize=(12,8))
plt.xlabel('Date')
plt.ylabel('Cases')
plt.title('Coronavirus Cases')
plt.plot(pd.to_datetime(confirmed.index),confirmed['Pakistan'])
plt.plot(pd.to_datetime(deaths.index),deaths['Pakistan']*10)
plt.legend(['Pakistan Confirmed','Pakistan Deathsx10'])
```
Fortunately, in Pakistan we haven't seen that case uptill now, but this doesn't mean that it will never become infected like Italy, only time will tell. Another thing is, that the data collection quality and the healthcare system quality overall, is quite different in Pakistan as compared to Italy or the US.
| github_jupyter |
# Building Model for MALARIA DETECTION
### Lets take a look at how we are gonna make our model
#### Step 1: Loading and Splitting of the Dataset
- The first step is to load the data and scaling the images to binary 0 and 1 from Parasitized and Uninfected.
- Then we will resize the images to 50 x 50
- After that suffling of the images before train-test-split and converting the images to a single numpy array
- Splitting the data
- Converting the type of X_train and X_valid to float32
- Add then One Hot Encoding on y
#### Step 2: Building the CNN model
- The CNN model is one of the efficient nueral networks for images and performing classifications. We will use tf.keras to build the CNN model.
- We will build a Sequential CNN model.
- We will build a CNN Layer followed by MaxPooling layer which is later followed by BatchNormalisation to normalize the previous layer's output and implement the Dropout regularization. After that we will use Flatten to the outputs. Then the last layer that has function Softmax is the output layer.
- Finally we have to compile the CNN model. We will use optimizer called Adam then will apply the loss function as categorical_crossentropy and an evaluation metric as accuracy.
- Next step is to use the fit function, to train our convolutional neural network (CNN) with X_train and y_train. Lets set the total amounts of epochs as 25 epochs, which is essentially 25 cycles or iterations of the full dataset including a batch size of 120.
#### Step 3 : Predictions and Testing of the Model
- After this we will predict and do evaluation on the builded model.
- The last step will be to test our model on the HOLDOUT DATASET and making predictions.
### Importing Libraries
```
# importing the libraries for loading data and visualisation
import os
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from PIL import Image
import seaborn as sns
# import for train-test-split
from sklearn.model_selection import train_test_split
# import for One Hot Encoding
from keras.utils import to_categorical
# importing libraries for Model
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Flatten, Dropout, BatchNormalization
# importing libraries for evaluating the model
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
```
## Loading Data and Train-Test-Split
```
# loading the data of images and setting their labels
data = []
labels = []
Parasitized = os.listdir("../input/cell-images-for-detecting-malaria/cell_images/Parasitized/")
for a in Parasitized:
try:
image = cv2.imread("../input/cell-images-for-detecting-malaria/cell_images/Parasitized/" + a)
image_from_array = Image.fromarray(image, 'RGB')
size_image = image_from_array.resize((50, 50))
data.append(np.array(size_image))
labels.append(0)
except AttributeError:
print("")
Uninfected = os.listdir("../input/cell-images-for-detecting-malaria/cell_images/Uninfected/")
for b in Uninfected:
try:
image = cv2.imread("../input/cell-images-for-detecting-malaria/cell_images/Uninfected/" + b)
image_from_array = Image.fromarray(image, 'RGB')
size_image = image_from_array.resize((50, 50))
data.append(np.array(size_image))
labels.append(1)
except AttributeError:
print("")
# Creating single numpy array of all the images and labels
data = np.array(data)
labels = np.array(labels)
print('Cells : {} and labels : {}'.format(data.shape , labels.shape))
# lets shuffle the data and labels before splitting them into training and testing sets
n = np.arange(data.shape[0])
np.random.shuffle(n)
data = data[n]
labels = labels[n]
### Splitting the dataset into the Training set and Test set
X_train, X_valid, y_train, y_valid = train_test_split(data, labels, test_size = 0.2, random_state = 0)
print('Train data shape {} ,Test data shape {} '.format(X_train.shape, X_valid.shape))
X_train = X_train.astype('float32')
X_valid = X_valid.astype('float32')
# One Hot Encoding
y_train = to_categorical(y_train)
y_valid = to_categorical(y_valid)
```
## Building Model
```
# Defining Model
classifier = Sequential()
# CNN layers
classifier.add(Conv2D(32, kernel_size=(3, 3), input_shape = (50, 50, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(BatchNormalization(axis = -1))
classifier.add(Dropout(0.5)) # Dropout prevents overfitting
classifier.add(Conv2D(32, kernel_size=(3, 3), input_shape = (50, 50, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(BatchNormalization(axis = -1))
classifier.add(Dropout(0.5))
classifier.add(Flatten())
classifier.add(Dense(units=128, activation='relu'))
classifier.add(BatchNormalization(axis = -1))
classifier.add(Dropout(0.5))
classifier.add(Dense(units=2, activation='softmax'))
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
history = classifier.fit(X_train, y_train, batch_size=120, epochs=25, verbose=1, validation_data=(X_valid, y_valid))
print("Test_Accuracy: {:.2f}%".format(classifier.evaluate(X_valid, y_valid)[1]*100))
```
## "The Neural Network gave Accuracy of 95.23% "
- Summary of the Model
```
classifier.summary()
```
## Prediction, Evaluation and Testing of the Model
- Lets do our first prediction using predict and predict X_valid and store in y_pred variable
```
y_pred = classifier.predict(X_valid)
# Convert back to categorical values
y_pred = np.argmax(y_pred, axis=1)
y_valid = np.argmax(y_valid, axis=1)
print('Accuracy Score: ', accuracy_score(y_valid, y_pred))
```
- Evaluation of the CNN model by Plotting Confusion Matrix
```
# Plotting the Confusion Matrix
conf = confusion_matrix(y_valid, y_pred)
sns.heatmap(conf, annot=True)
```
- Testing model on Holdout Dataset
```
# loading holdout dataset
data_new = []
holdout_set = os.listdir("../input/holdout-dataset-for-malaria/holdout_dataset")
for i in holdout_set:
try:
image = cv2.imread("../input/holdout-dataset-for-malaria/holdout_dataset/" + i)
image_from_array = Image.fromarray(image, 'RGB')
size_image = image_from_array.resize((50, 50))
data_new.append(np.array(size_image))
except AttributeError:
print("")
# Creating numpy array
data_new = np.array(data_new)
print(data_new.shape)
```
- Lets see now how our model predicts the Holdout dataset images
- Taking example of two images lets first take a look at the images and then predict them using our model and see how our model predicts the images
```
plt.imshow(data_new[10])
plt.imshow(data_new[199])
```
- Clearly the first one is Uninfected
- And second image is Parasitized
- Lets see what our model predicts now
```
data_new = data_new.astype('float32')
# prediction
pred = classifier.predict(data_new)
pred = np.argmax(pred, axis=1)
pred[10]
pred[199]
```
- The first prediction's output is 1, which means image is Uninfected
- Second prediction's output is 0, which means image is Parasitized
- So our model predictions are correct..
# Data Augmentation
### Augmentation of Data
```
from keras.preprocessing.image import ImageDataGenerator
train_generator = ImageDataGenerator(rescale = 1/255,
zoom_range = 0.3,
horizontal_flip = True,
rotation_range = 30)
test_generator = ImageDataGenerator(rescale = 1/255)
train_generator = train_generator.flow(np.array(X_train),
y_train,
batch_size = 120,
shuffle = False)
test_generator = test_generator.flow(np.array(X_valid),
y_valid,
batch_size = 120,
shuffle = False)
```
For the training data, we rescaled the pictures by dividing it by 255, zoomed pictures with a variety of 0.3, After that, flipped the pictures horizontally plus rotated them by 30. And for the remaining data, which is validating data, only rescale the pictures. The train_generator and test_generator methods are built with a batch size of 120.
### Calculating new accuracy
```
history = classifier.fit_generator(train_generator,
steps_per_epoch = len(X_train)/120,
epochs = 25,
shuffle = False)
print("Test_Accuracy(after augmentation): {:.2f}%".format(classifier.evaluate_generator(test_generator, steps = len(X_valid), verbose = 1)[1]*100))
```
After that, we trained the classifier (the model) by utilizing the fit_generator method and measured that new accuracy.
### New Accuracy is approximately 95%
| github_jupyter |
```
##########################################################
# Relative Imports
##########################################################
import sys
from os.path import isfile
from os.path import join
def find_pkg(name: str, depth: int):
if depth <= 0:
ret = None
else:
d = [".."] * depth
path_parts = d + [name, "__init__.py"]
if isfile(join(*path_parts)):
ret = d
else:
ret = find_pkg(name, depth - 1)
return ret
def find_and_ins_syspath(name: str, depth: int):
path_parts = find_pkg(name, depth)
if path_parts is None:
raise RuntimeError("Could not find {}. Try increasing depth.".format(name))
path = join(*path_parts)
if path not in sys.path:
sys.path.insert(0, path)
try:
import caldera
except ImportError:
find_and_ins_syspath("caldera", 3)
```
# Caldera
```
import caldera
```
## Data tour
### GraphData
The primary data object used by caldera. We can generate random objects by using `GraphData.random` for testing and demo purposes...
```
from caldera.data import GraphData
data = GraphData.random(n_feat=5, e_feat=4, g_feat=3)
data
```
`GraphData` instances contain node tensor (`data.x`), edge tensor (`data.e`) and global tensor (`data.g`). They also keep graph topology
via and edge list, `data.edges`, which indicates edges by node indices. Note that this mean graphs are MultiGraphs (multiple edges between same nodes allowed)
#### node features
Each node gets a tensor. These tensors are stack into `data.x`
```
print('We have {} nodes'.format(data.x.shape[0]))
print('Each node has a feature tensor of shape {}'.format(data.x.shape[1]))
print("Overall shape: {}".format(data.x.shape))
data.x
```
#### edge features
Each edge gets a tensor. These tensors are stack into `data.e`
```
print('We have {} edge(s)'.format(data.e.shape[0]))
print('Each edge has a feature tensor of shape {}'.format(data.e.shape[1]))
print("Overall shape: {}".format(data.e.shape))
data.x
```
#### global features
Each graph gets a single global tensor. These tensors are stack into `data.g`.
We may use this, for example, to label each graph with a category.
```
print('We have a single global tensor of shape {}'.format(data.g.shape))
data.g
```
#### edges
Graph topology is store in the `data.edges` attribute. This indicates which nodes (by index) are connected.
```
data.edges
```
### Conversion
We can convert `GraphData` instances to and from networkx instances (graphs).
```
import networkx as nx
%matplotlib inline
data = GraphData.random(5, 4, 3)
# convert to a networkx object
graph = data.to_networkx()
# draw
nx.draw(graph)
# convert back to GraphData object
from_nx = GraphData.from_networkx(graph)
print(from_nx)
```
From networkx...
# convert back to GraphData object
GraphData.from_networkx(graph)
| github_jupyter |
```
from dataretrieval import nwis
```
The dataRetrieval package was created as a python equivalent to the R dataRetrieval tool.
The following shows python equivalents for methods outlined in the R dataRetrieval Vignette with the equivalent R code in comments
```
'''
library(dataRetrieval)
# Choptank River near Greensboro, MD
siteNumber <- "01491000"
ChoptankInfo <- readNWISsite(siteNumber)
parameterCd <- "00060"
#Raw daily data:
rawDailyData <- readNWISdv(siteNumber,parameterCd,
"1980-01-01","2010-01-01")
# Sample data Nitrate:
parameterCd <- "00618"
qwData <- readNWISqw(siteNumber,parameterCd,
"1980-01-01","2010-01-01")
pCode <- readNWISpCode(parameterCd)
'''
# Choptank River near Greensboro, MD
siteNumber = '01491000'
chop_tank_info, md = nwis.get_info(sites=siteNumber)
parameterCd = '00060'
# raw daily data
rawDailyData, md = nwis.get_dv(sites=siteNumber, parameterCd=parameterCd, start="1980-01-01", end="2010-01-01")
# sample data Nitrate:
parameterCd = "00618"
qwData, md = nwis.get_qwdata(sites=siteNumber, parameterCd=parameterCd, start="1980-01-01", end="2010-01-01")
pCode, md = nwis.get_pmcodes(parameterCd=parameterCd)
'''
{r getSite, echo=TRUE, eval=FALSE}
siteNumbers <- c("01491000","01645000")
siteINFO <- readNWISsite(siteNumbers)
'''
siteNumbers = ["01491000","01645000"]
siteINFO, md = nwis.get_iv(sites=siteNumbers)
'''
# Continuing from the previous example:
# This pulls out just the daily, mean data:
dailyDataAvailable <- whatNWISdata(siteNumbers,
service="dv", statCd="00003")
'''
dailyDataAvailable, md = nwis.get_dv(sites=siteNumbers, statCd="00003")
'''
# Using defaults:
parameterCd <- "00618"
parameterINFO <- readNWISpCode(parameterCd)
'''
pCode, md = nwis.get_pmcodes(parameterCd="00618")
'''
# Choptank River near Greensboro, MD:
siteNumber <- "01491000"
parameterCd <- "00060" # Discharge
startDate <- "2009-10-01"
endDate <- "2012-09-30"
discharge <- readNWISdv(siteNumber,
parameterCd, startDate, endDate)
'''
# Choptank River near Greensboro, MD:
siteNumber = "01491000"
parameterCd = "00060" # Discharge
startDate = "2009-10-01"
endDate = "2012-09-30"
discharge, md = nwis.get_dv(sites=siteNumber, parameterCd=parameterCd, start=startDate, end=endDate)
'''
siteNumber <- "01491000"
parameterCd <- c("00010","00060") # Temperature and discharge
statCd <- c("00001","00003") # Mean and maximum
startDate <- "2012-01-01"
endDate <- "2012-05-01"
temperatureAndFlow <- readNWISdv(siteNumber, parameterCd,
startDate, endDate, statCd=statCd)
'''
siteNumber = "01491000"
parameterCd = ["00010","00060"] # Temperature and discharge
statCd = ["00001","00003"] # Mean and maximum
startDate = "2012-01-01"
endDate = "2012-05-01"
temperatureAndFlow, md = nwis.get_dv(sites=siteNumber, parameterCd=parameterCd,
start=startDate, end=endDate, statCd=statCd)
'''
parameterCd <- "00060" # Discharge
startDate <- "2012-05-12"
endDate <- "2012-05-13"
dischargeUnit <- readNWISuv(siteNumber, parameterCd,
startDate, endDate)
'''
siteNumber = "01491000"
parameterCd = "00060" # Discharge
startDate = "2012-05-12"
endDate = "2012-05-13"
dischargeUnit, md = nwis.get_iv(sites=siteNumber, parameterCd=parameterCd,
start=startDate, end=endDate)
'''
# Dissolved Nitrate parameter codes:
parameterCd <- c("00618","71851")
startDate <- "1985-10-01"
endDate <- "2012-09-30"
dfLong <- readNWISqw(siteNumber, parameterCd,
startDate, endDate)
# Or the wide return:
dfWide <- readNWISqw(siteNumber, parameterCd,
startDate, endDate, reshape=TRUE)
'''
siteNumber = "01491000"
parameterCd = ["00618","71851"]
startDate = "1985-10-01"
endDate = "2012-09-30"
dfLong, md = nwis.get_qwdata(sites=siteNumber, parameterCd=parameterCd,
start=startDate, end=endDate)
'''
siteNumber <- "434400121275801"
groundWater <- readNWISgwl(siteNumber)
'''
siteNumber = "434400121275801"
groundWater, md = nwis.get_gwlevels(sites=siteNumber)
'''
siteNumber <- '01594440'
peakData <- readNWISpeak(siteNumber)
'''
siteNumber = '01594440'
peakData, md = nwis.get_discharge_peaks(sites=siteNumber)
'''
ratingData <- readNWISrating(siteNumber, "base")
attr(ratingData, "RATING")
'''
ratings_data, md = nwis.get_ratings(site='01594440', file_type="base")
'''surfaceData <- readNWISmeas(siteNumber)'''
siteNumber = '01594440'
surface_data, md = nwis.get_discharge_measurements(sites=siteNumber)
'''
allegheny <- readNWISuse(stateCd = "Pennsylvania",
countyCd = "Allegheny")
national <- readNWISuse(stateCd = NULL,
countyCd = NULL,
transform = TRUE)
'''
from dataretrieval import nwis
allegheny, md = nwis.get_water_use(state="PA", counties="003")
national, md = nwis.get_water_use()
'''
discharge_stats <- readNWISstat(siteNumbers=c("02319394"),
parameterCd=c("00060"),
statReportType="annual")
'''
discharge_stats, md = nwis.get_stats(sites='02319394', parameterCd="00060", statReportType='annual', statTypeCd='all')
'''
specificCond <- readWQPqw('WIDNR_WQX-10032762',
'Specific conductance',
'2011-05-01','2011-09-30')
'''
from dataretrieval import wqp
specific_cond, md = wqp.get_results(siteid='WIDNR_WQX-10032762',
characteristicName = 'Specific conductance',
startDateLo='2011-05-01', startDateHi='2011-09-30')
'''
dischargeWI <- readNWISdata(service="dv",
stateCd="WI",
parameterCd="00060",
drainAreaMin="50",
statCd="00003")
'''
dischargeWI, md = nwis.get_dv(stateCd="WI", parameterCd="00060", drainAreaMin="50", statCd="00003")
'''
sitesNJ <- whatWQPsites(statecode="US:34",
characteristicName="Chloride")
'''
from dataretrieval import wqp
sitesNJ, md = wqp.what_sites(statecode="US:34", characteristicName="Chloride")
'''
dataPH <- readWQPdata(statecode="US:55",
characteristicName="pH")
'''
dataPH, md = wqp.what_sites(statecode="US:55", characteristicName="pH")
'''
type <- "Stream"
sites <- whatWQPdata(countycode="US:55:025",siteType=type)
'''
streamType = "Stream"
sites, md = wqp.get_results(countycode="US:55:025", siteType=streamType)
'''site <- whatWQPsamples(siteid="USGS-01594440")'''
site, md = wqp.what_sites(siteid="USGS-01594440")
'''
type <- "Stream"
sites <- whatWQPmetrics(countycode="US:55:025",siteType=type)
'''
streamType = "Stream"
sites, md = wqp.what_sites(countycode="US:55:025",siteType=streamType)
```
# Embedded Metadata
All service methods return the DataFrame containing requested data and Metadata as a tuple. Note, a call using get_record will only return the DataFrame to remain compatible with previous usage.
```
national, md = nwis.get_water_use()
```
md is an object with the following attributes
```
Metadata
url # the resulting url to query usgs
query_time # the time it took to query usgs
site_info # a method to call site_info with the site parameters supplied
header # any headers attached to the response object
```
| github_jupyter |
```
# default_exp inference
```
# Inference
> This contains the code required for inference.
```
# export
from fastai.learner import load_learner
from fastai.callback.core import GatherPredsCallback
from fastai.learner import Learner
from fastcore.basics import patch
from fastcore.meta import delegates
#export
@patch
def get_X_preds(self: Learner, X, y=None, bs=64, with_input=False, with_decoded=True, with_loss=False):
if with_loss and y is None:
print("cannot find loss as y=None")
with_loss = False
dl = self.dls.valid.new_dl(X, y=y)
if bs: setattr(dl, "bs", bs)
else: assert dl.bs, "you need to pass a bs != 0"
output = list(self.get_preds(dl=dl, with_input=with_input, with_decoded=with_decoded, with_loss=with_loss, reorder=False))
if with_decoded and len(self.dls.tls) >= 2 and hasattr(self.dls.tls[-1], "tfms") and hasattr(self.dls.tls[-1].tfms, "decodes"):
output[2 + with_input] = self.dls.tls[-1].tfms.decode(output[2 + with_input])
return tuple(output)
```
Get the predictions and targets, optionally with_input and with_loss.
with_decoded will also return the decoded predictions (it reverses the transforms applied).
The order of the output is the following:
- input (optional): if with_input is True
- **probabiblities** (for classification) or **predictions** (for regression)
- **target**: if y is provided. Otherwise None.
- **predictions**: predicted labels. Predictions will be decoded if with_decoded=True.
- loss (optional): if with_loss is set to True and y is not None.
```
from tsai.data.external import get_UCR_data
dsid = 'OliveOil'
X, y, splits = get_UCR_data(dsid, split_data=False)
X_test = X[splits[1]]
y_test = y[splits[1]]
learn = load_learner("./models/test.pth")
```
⚠️ Warning: load_learner (from fastai) requires all your custom code be in the exact same place as when exporting your Learner (the main script, or the module you imported it from).
```
test_probas, test_targets, test_preds = learn.get_X_preds(X_test, with_decoded=True)
test_probas, test_targets, test_preds
test_probas2, test_targets2, test_preds2 = learn.get_X_preds(X_test, y_test, with_decoded=True) # This test fails on torch==1.10.0
test_probas2, test_targets2, test_preds2
test_probas3, test_targets3, test_preds3, test_losses3 = learn.get_X_preds(X_test, y_test, with_loss=True, with_decoded=True)
test_probas3, test_targets3, test_preds3, test_losses3
from fastcore.test import test_eq
test_eq(test_probas, test_probas2)
test_eq(test_preds, test_preds2)
test_eq(test_probas, test_probas3)
test_eq(test_preds, test_preds3)
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
```
| github_jupyter |
# Writing Down Qubit States
```
from qiskit import *
```
In the previous chapter we saw that there are multiple ways to extract an output from a qubit. The two methods we've used so far are the z and x measurements.
```
# z measurement of qubit 0
measure_z = QuantumCircuit(1,1)
measure_z.measure(0,0);
# x measurement of qubit 0
measure_x = QuantumCircuit(1,1)
measure_x.h(0)
measure_x.measure(0,0);
```
Sometimes these measurements give results with certainty. Sometimes their outputs are random. This all depends on which of the infinitely many possible states our qubit is in. We therefore need a way to write down these states and figure out what outputs they'll give. For this we need some notation, and we need some math.
### The z basis
If you do nothing in a circuit but a measurement, you are certain to get the outcome `0`. This is because the qubits always start in a particular state, whose defining property is that it is certain to output a `0` for a z measurement.
We need a name for this state. Let's be unimaginative and call it $0$ . Similarly, there exists a qubit state that is certain to output a `1`. We'll call this $1$.
These two states are completely mutually exclusive. Either the qubit definitely outputs a ```0```, or it definitely outputs a ```1```. There is no overlap.
One way to represent this with mathematics is to use two orthogonal vectors.
$$
|0\rangle = \begin{pmatrix} 1 \\\\\\ 0 \end{pmatrix} \, \, \, \, |1\rangle =\begin{pmatrix} 0 \\\\\\ 1 \end{pmatrix}.
$$
This is a lot of notation to take in all at once. First let's unpack the weird $|$ and $\rangle$ . Their job is essentially just to remind us that we are talking about the vectors that represent qubit states labelled $0$ and $1$. This helps us distinguish them from things like the bit values ```0``` and ```1``` or the numbers 0 and 1. It is part of the bra-ket notation, introduced by Dirac.
If you are not familiar with vectors, you can essentially just think of them as lists of numbers which we manipulate using certain rules. If you are familiar with vectors from your high school physics classes, you'll know that these rules make vectors well-suited for describing quantities with a magnitude and a direction. For example, velocity of an object is described perfectly with a vector. However, the way we use vectors for quantum states is slightly different to this. So don't hold on too hard to your previous intuition. It's time to do something new!
In the example above, we wrote the vector as a vertical list of numbers. We call these _column vectors_. In Dirac notation, they are also called _kets_.
Horizontal lists are called _row vectors_. In Dirac notation they are _bras_. They are represented with a $\langle$ and a $|$.
$$
\langle 0| = \begin{pmatrix} 1 & 0\end{pmatrix} \, \, \, \, \langle 1| =\begin{pmatrix} 0 & 1 \end{pmatrix}.
$$
The rules on how to manipulate vectors define what it means to add or multiply them. For example, to add two vectors we need them to be the same type (either both column vectors, or both row vectors) and the same length. Then we add each element in one list to the corresponding element in the other. For a couple of arbitrary vectors that we'll call $a$ and $b$, this works as follows.
$$
\begin{pmatrix} a_0 \\\\ a_1 \end{pmatrix} +\begin{pmatrix} b_0 \\\\ b_1 \end{pmatrix}=\begin{pmatrix} a_0+b_0 \\\\ a_1+b_1 \end{pmatrix}.
$$
To multiple a vector by a number, we simply multiply every element in the list by that number:
$$
x \times\begin{pmatrix} a_0 \\\\ a_1 \end{pmatrix} = \begin{pmatrix} x \times a_0 \\\\ x \times a_1 \end{pmatrix}
$$
Multiplying a vector with another vector is a bit more tricky, since there are multiple ways we can do it. One is called the 'inner product', and works as follows.
$$
\begin{pmatrix} a_0 & a_1 \end{pmatrix} \begin{pmatrix} b_0 \\\\ b_1 \end{pmatrix}= a_0~b_0 + a_1~b_1.
$$
Note that the right hand side of this equation contains only normal numbers being multipled and added in a normal way. The inner product of two vectors therefore yields just a number. As we'll see, we can interpret this as a measure of how similar the vectors are.
The inner product requires the first vector to be a bra and the second to be a ket. In fact, this is where their names come from. Dirac wanted to write the inner product as something like $\langle a | b \rangle$, which looks like the names of the vectors enclosed in brackets. Then he worked backwards to split the _bracket_ into a _bra_ and a _ket_.
If you try out the inner product on the vectors we already know, you'll find
$$
\langle 0 | 0\rangle = \langle 1 | 1\rangle = 1,\\\\
\langle 0 | 1\rangle = \langle 1 | 0\rangle = 0.
$$
Here we are using a concise way of writing the inner products where, for example, $\langle 0 | 1 \rangle$ is the inner product of $\langle 0 |$ with $| 1 \rangle$. The top line shows us that the inner product of these states with themselves always gives a 1. When done with two orthogonal states, as on the bottom line, we get the outcome 0. These two properties will come in handy later on.
### The x basis - part 1
So far we've looked at states for which the z measurement has a certain outcome. But there are also states for which the outcome of a z measurement is equally likely to be `0` or `1`. What might these look like in the language of vectors?
A good place to start would be something like $|0\rangle + |1\rangle$ , since this includes both $|0\rangle$ and $|1\rangle$ with no particular bias towards either. But let's hedge our bets a little and multiply it by some number $x$ .
$$
x ~ (|0\rangle + |1\rangle) = \begin{pmatrix} x \\\\ x \end{pmatrix}
$$
We can choose the value of $x$ to make sure that the state plays nicely in our calculations. For example, think about the inner product,
$$
\begin{pmatrix} x & x \end{pmatrix} \times \begin{pmatrix} x \\\\ x \end{pmatrix}= 2x^2.
$$
We can get any value for the inner product that we want, just by choosing the appropriate value of $x$.
As mentioned earlier, we are going to use the inner product as a measure of how similar two vectors are. With this interpretation in mind, it is natural to require that the inner product of any state with itself gives the value $1$. This is already achieved for the inner products of $|0\rangle$ and $|1\rangle$ with themselves, so let's make it true for all other states too.
This condition is known as the normalization condition. In this case, it means that $x=\frac{1}{\sqrt{2}}$. Now we know what our new state is, so here's a few ways of writing it down.
$$
\begin{pmatrix} \frac{1}{\sqrt{2}} \\\\ \frac{1}{\sqrt{2}} \end{pmatrix} = \frac{1}{\sqrt{2}}\begin{pmatrix} 1 \\\\ 1 \end{pmatrix} = \frac{ |0\rangle + |1\rangle}{\sqrt{2}}
$$
This state is essentially just $|0\rangle$ and $|1\rangle$ added together and then normalized, so we will give it a name to reflect that origin. We call it $|+\rangle$ .
### The Born rule
Now we've got three states that we can write down as vectors. We can also calculate inner products for them. For example, the inner product of each with $\langle 0 |$ is
$$
\langle 0 | 0\rangle = 1 \\\\ \langle 0 | 1\rangle = 0 \\\\ \, \, \, \, \langle 0 | +\rangle = \frac{1}{\sqrt{2}}.
$$
We also know the probabilities of getting various outcomes from a z measurement for these states. For example, let's use $p^z_0$ to denote the probability of the result `0` for a z measurement. The values this has for our three states are
$$
p_0^z( | 0\rangle) = 1,\\\\ p_0^z( | 1\rangle) = 0, \\\\ p_0^z( | +\rangle) = \frac{1}{2}.
$$
As you might have noticed, there's a lot of similarlity between the numbers we get from the inner products and those we get for the probabilities. Specifically, the three probabilities can all be written as the square of the inner products:
$$
p_0^z(|a\rangle) = (~\langle0|a\rangle~)^2.
$$
Here $|a\rangle$ represents any generic qubit state.
This property doesn't just hold for the `0` outcome. If we compare the inner products with $\langle 1 |$ with the probabilities of the `1` outcome, we find a similar relation.
$$
\\\\
p_1^z(|a\rangle) = (~\langle1|a\rangle~)^2.
$$
The same also holds true for other types of measurement. All probabilities in quantum mechanics can be expressed in this way. It is known as the *Born rule*.
### Global and relative phases
Vectors are how we use math to represent the state of a qubit. With them we can calculate the probabilities of all the possible things that could ever be measured. These probabilities are essentially all that is physically relevant about a qubit. It is by measuring them that we can determine or verify what state our qubits are in. Any aspect of the state that doesn't affect the probabilities is therefore just a mathematical curiosity.
Let's find an example. Consider a state that looks like this:
$$
|\tilde 0\rangle = \begin{pmatrix} -1 \\\\ 0 \end{pmatrix} = -|0\rangle.
$$
This is equivalent to multiplying the state $|0\rangle$ by $-1$. It means that every inner product we could calculate with $|\tilde0\rangle$ is the same as for $|0\rangle$, but multplied by $-1$.
$$
\langle a|\tilde 0\rangle = -\langle a| 0\rangle
$$
As you probably know, any negative number squares to the same value as its positive counterpart: $(-x)^2 =x^2$.
Since we square inner products to get probabilities, this means that any probability we could ever calculate for $|\tilde0\rangle$ will give us the same value as for $|0\rangle$. If the probabilities of everything are the same, there is no observable difference between $|\tilde0\rangle$ and $|0\rangle$; they are just different ways of representing the same state.
This is known as the irrelevance of the global phase. Quite simply, this means that multplying the whole of a quantum state by $-1$ gives us a state that will look different mathematically, but which is actually completely equivalent physically.
The same is not true if the phase is *relative* rather than *global*. This would mean multiplying only part of the state by $-1$ , for example:
$$
\begin{pmatrix} a_0 \\\\ a_1 \end{pmatrix} \rightarrow \begin{pmatrix} a_0 \\\\ -a_1 \end{pmatrix}.
$$
Doing this with the $|+\rangle$ state gives us a new state. We'll call it $|-\rangle$.
$$
|-\rangle = \frac{1}{\sqrt{2}}\begin{pmatrix} 1 \\\\ -1 \end{pmatrix} = \frac{ |0\rangle - |1\rangle}{\sqrt{2}}
$$
The values $p_0^z$ and $p_1^z$ for $|-\rangle$ are the same as for $|+\rangle$. These two states are thus indistinguishable when we make only z measurements. But there are other ways to distinguish them. To see how, consider the inner product of $|+\rangle$ and $|-\rangle$.
$$
\langle-|+\rangle = \langle+|-\rangle = 0
$$
The inner product is 0, just as it is for $|0\rangle$ and $|1\rangle$. This means that the $|+\rangle$ and $|-\rangle$ states are orthogonal: they represent a pair of mutually exclusive possible ways for a qubit to be a qubit.
### The x basis - part 2
Whenever we find a pair of orthogonal qubit states, we can use it to define a new kind of measurement.
First, let's apply this to the case we know well: the z measurement. This asks a qubit whether it is $|0\rangle$ or $|1\rangle$. If it is $|0\rangle$, we get the result `0`. For $|1\rangle$ we get `1`. Anything else, such as $|+\rangle$, is treated as a superposition of the two.
$$
|+\rangle = \frac{|0\rangle+|1\rangle}{\sqrt{2}}.
$$
For a superposition, the qubit needs to randomly choose between the two possibilities according to the Born rule.
We can similarly define a measurement based on $|+\rangle$ and $|-\rangle$. This asks a qubit whether it is $|+\rangle$ or $|-\rangle$. If it is $|+\rangle$, we get the result `0`. For $|-\rangle$ we get `1`. Anything else is treated as a superposition of the two. This includes the states $|0\rangle$ and $|1\rangle$, which we can write as
$$
|0\rangle = \frac{|+\rangle+|-\rangle}{\sqrt{2}}, \, \, \, \, |1\rangle = \frac{|+\rangle-|-\rangle}{\sqrt{2}}.
$$
For these, and any other superpositions of $|+\rangle$ and $|-\rangle$, the qubit chooses its outcome randomly with probabilities
$$
p_0^x(|a\rangle) = (~\langle+|a\rangle~)^2,\\\\
p_1^x(|a\rangle) = (~\langle-|a\rangle~)^2.
$$
This is the x measurement.
### The conservation of certainty
Qubits in quantum circuits always start out in the state $|0\rangle$. By applying different operations, we can make them explore other states.
Try this out yourself using a single qubit, creating circuits using operations from the following list, and then doing the x and z measurements in the way described at the top of the page.
```
qc = QuantumCircuit(1)
qc.h(0) # the hadamard
qc.x(0) # x gate
qc.y(0) # y gate
qc.z(0) # z gate
# for the following, replace theta by any number
theta = 3.14159/4
qc.ry(theta,0); # y axis rotation
```
You'll find examples where the z measurement gives a certain result, but the x is completely random. You'll also find examples where the opposite is true. Furthermore, there are many examples where both are partially random. With enough experimentation, you might even uncover the rule that underlies this behavior:
$$
(p^z_0-p^z_1)^2 + (p^x_0-p^x_1)^2 = 1.
$$
This is a version of Heisenberg's famous uncertainty principle. The $(p^z_0-p^z_1)^2$ term measures how certain the qubit is about the outcome of a z measurement. The $(p^x_0-p^x_1)^2$ term measures the same for the x measurement. Their sum is the total certainty of the two combined. Given that this total always takes the same value, we find that the amount of information a qubit can be certain about is a limited and conserved resource.
Here is a program to calculate this total certainty. As you should see, whatever gates from the above list you choose to put in `qc`, the total certainty comes out as $1$ (or as near as possible given statistical noise).
```
shots = 2**14 # number of samples used for statistics
uncertainty = 0
for measure_circuit in [measure_z, measure_x]:
# run the circuit with a the selected measurement and get the number of samples that output each bit value
counts = execute(qc+measure_circuit,Aer.get_backend('qasm_simulator'),shots=shots).result().get_counts()
# calculate the probabilities for each bit value
probs = {}
for output in ['0','1']:
if output in counts:
probs[output] = counts[output]/shots
else:
probs[output] = 0
uncertainty += ( probs['0'] - probs['1'] )**2
# print the total uncertainty
print('The total uncertainty is',uncertainty )
```
Now we have found this rule, let's try to break it! Then we can hope to get a deeper understanding of what is going on. We can do this by simply implementing the operation below, and then recalculating the total uncertainty.
```
# for the following, replace theta by any number
theta = 3.14159/2
qc.rx(theta,0); # x axis rotation
```
For a circuit with a single `rx` with $\theta=\pi/2$, we will find that $(p^z_0-p^z_1)^2 + (p^x_0-p^x_1)^2=0$. This operation seems to have reduced our total certainty to zero.
All is not lost, though. We simply need to perform another identical `rx` gate to our circuit to go back to obeying $(p^z_0-p^z_1)^2 + (p^x_0-p^x_1)^2=1$. This shows that the operation does not destroy our certainty; it simply moves it somewhere else and then back again. So let's find that somewhere else.
### The y basis - part 1
There are infinitely many ways to measure a qubit, but the z and x measurements have a special relationship with each other. We say that they are *mutually unbiased*. This simply means that certainty for one implies complete randomness for the other.
At the end of the last section, it seemed that we were missing a piece of the puzzle. We need another type of measurement to plug the gap in our total certainty, and it makes sense to look for one that is also mutually unbiased with x and z.
The first step is to find a state that seems random to both x and z measurements. Let's call it $|\circlearrowleft\rangle$, for no apparent reason.
$$
|\circlearrowleft\rangle = c_0 | 0 \rangle + c_1 | 1 \rangle
$$
Now the job is to find the right values for $c_0$ and $c_1$. You could try to do this with standard positive and negative numbers, but you'll never be able to find a state that is completely random for both x and z measurements. To achieve this, we need to use complex numbers.
### Complex numbers
Hopefully you've come across complex numbers before, but here is a quick reminder.
Normal numbers, such as the ones we use for counting bananas, are known as *real numbers*. We cannot solve all possible equations using only real numbers. For example, there is no real number that serves as the square root of $-1$. To deal with this issue, we need more numbers, which we call *complex numbers*.
To define complex numbers we start by accepting the fact that $-1$ has a square root, and that its name is $i$. Any complex number can then be written
$$
x = x_r + i~x_i .
$$
Here $x_r$ and $x_i$ are both normal numbers \(positive or negative\), where $x_r$ is known as the real part and $x_i$ as the imaginary part.
For every complex number $x$ there is a corresponding complex conjugate $x^*$
$$
x^* = x_r - i~x_i .
$$
Multiplying $x$ by $x^*$ gives us a real number. It's most useful to write this as
$$
|x| = \sqrt{x~x^*}.
$$
Here $|x|$ is known as the magnitude of $x$ \(or, equivalently, of $x^*$ \).
If we are going to allow the numbers in our quantum states to be complex, we'll need to upgrade some of our equations.
First, we need to ensure that the inner product of a state with itself is always 1. To do this, the bra and ket versions of the same state must be defined as follows:
$$
|a\rangle = \begin{pmatrix} a_0 \\\\ a_1 \end{pmatrix}, ~~~ \langle a| = \begin{pmatrix} a_0^* & a_1^* \end{pmatrix}.
$$
Then we just need a small change to the Born rule, where we square the magnitudes of inner products, rather than just the inner products themselves.
$$
p_0^z(|a\rangle) = |~\langle0|a\rangle~|^2,\\\\
p_1^z(|a\rangle) = |~\langle1|a\rangle~|^2,\\\\
p_0^x(|a\rangle) = |~\langle+|a\rangle~|^2,\\\\
p_1^x(|a\rangle) = |~\langle-|a\rangle~|^2.
$$
The irrelevance of the global phase also needs an upgrade. Previously, we only talked about multiplying by -1. In fact, we can multiply a state by any complex number whose magnitude is 1. This will give us a state that will look different, but which is actually completely equivalent. This includes multiplying by $i$, $-i$ or infinitely many other possibilities.
### The y basis - part 2
Now that we have complex numbers, we can define the following pair of states.
$$
|\circlearrowright\rangle = \frac{ | 0 \rangle + i | 1 \rangle}{\sqrt{2}}, ~~~~ |\circlearrowleft\rangle = \frac{ | 0 \rangle -i | 1 \rangle}{\sqrt{2}}
$$
You can verify yourself that they both give random outputs for x and z measurements. They are also orthogonal to each other. They therefore define a new measurement, and that basis is mutally unbiased with x and z. This is the third and final fundamental measurement for a single qubit. We call it the y measurement, and can implement it with
```
# y measurement of qubit 0
measure_y = QuantumCircuit(1,1)
measure_y.sdg(0)
measure_y.h(0)
measure_y.measure(0,0);
```
With the x, y and z measurements, we now have everything covered. Whatever operations we apply, a single isolated qubit will always obey
$$
(p^z_0-p^z_1)^2 + (p^y_0-p^y_1)^2 + (p^x_0-p^x_1)^2 = 1.
$$
To see this, we can incorporate the y measurement into our measure of total certainty.
```
shots = 2**14 # number of samples used for statistics
uncertainty = 0
for measure_circuit in [measure_z, measure_x, measure_y]:
# run the circuit with a the selected measurement and get the number of samples that output each bit value
counts = execute(qc+measure_circuit,Aer.get_backend('qasm_simulator'),shots=shots).result().get_counts()
# calculate the probabilities for each bit value
probs = {}
for output in ['0','1']:
if output in counts:
probs[output] = counts[output]/shots
else:
probs[output] = 0
uncertainty += ( probs['0'] - probs['1'] )**2
# print the total uncertainty
print('The total uncertainty is',uncertainty )
```
For more than one qubit, this relation will need another upgrade. This is because the qubits can spend their limited certainty on creating correlations that can only be detected when multiple qubits are measured. The fact that certainty is conserved remains true, but it can only be seen when looking at all the qubits together.
Before we move on to entanglement, there is more to explore about just a single qubit. As we'll see in the next section, the conservation of certainty leads to a particularly useful way of visualizing single-qubit states and gates.
```
import qiskit
qiskit.__qiskit_version__
```
| github_jupyter |
# Archivos y Bases de datos
La idea de este taller es manipular archivos (leerlos, parsearlos y escribirlos) y hacer lo mismo con bases de datos estructuradas.
## Ejercicio 1
Baje el archivo de "All associations with added ontology annotations" del GWAS Catalog.
+ https://www.ebi.ac.uk/gwas/docs/file-downloads
Describa las columnas del archivo (_que información estamos mirando? Para qué sirve? Por qué la hicieron?_)
```
import pandas as pd
df = pd.read_csv('datoss.tsv', sep='\t')
df [1:1]
```
La base de datos se compone de 37 columnas (Variables). En términos generales los datos exponen la asociación genética de un tipo de patología (eg. Cáncer, Asma) y la información "detallada" sobre el estudio que determinó dicha relación.
Esta base de datos permite almacenar de forma ordenada la relación entre rasgos genéticos y fenotípicos. Los estudios contenidos en esta base de datos tienen como intención descubrir la clave para prevenir, diagnosticar y tratar una enfermedad. Esto, también es conocido como estudio de asociación de genoma completo.
Qué Entidades (tablas) puede definir?
1. Journal
2. Platform
3. Study
4. Publicacion
5. Enfermedad
6. Loci
7. Enfermedad_Loci (Tabala intermedia)
Cree la base de datos (copie el código SQL que se usó)
```
CREATE TABLE journal
(
id_JOURNAL int auto_increment PRIMARY KEY,
namejournal varchar (300)
);
CREATE TABLE platform
(
id_platform INT auto_increment NOT NULL PRIMARY KEY,
PLATFORMname VARCHAR(300)
);
CREATE TABLE study
(
id_STUDY int auto_increment PRIMARY KEY,
STUDY text,
INITIAL_SAMPLE_SIZE int,
REPLICATION_SAMPLE_SIZE int,
id_platform int,
foreign key (id_platform) references platform(id_platform),
P_VALUE int,
PVALUE_MLOG int,
PVALUE_TEXT varchar (300),
CI_text varchar (300),
OR_BETA int,
MAPPED_TRAIT varchar (300),
MAPPED_TRAIT_URI varchar (300)
);
CREATE TABLE publicacion
(
id_publicacion int auto_increment PRIMARY KEY,
PUBMEDID varchar (300),
FIRSTAUTHOR varchar (300),
id_journal int,
foreign key (id_JOURNAL) references journal(id_JOURNAL),
LINK varchar (300),
STUDY_ACCESSION varchar (300),
id_STUDY int,
foreign key (id_STUDY) references study(id_STUDY)
);
CREATE TABLE enfermedad
(
id_enfermedad int auto_increment PRIMARY KEY,
DISEASETRAITenfermedad VARCHAR(300)
);
CREATE TABLE loci
(
id_loci int auto_increment PRIMARY KEY,
REGION text,
CHR_ID text,
CHR_POS text,
REPORTED_GENE text,
MAPPED_GENE text,
UPSTREAM_GENE_ID text,
DOWNSTREAM_GENE_ID text,
SNP_GENE_IDS text,
UPSTREAM_GENE_DISTANCE text,
DOWNSTREAM_GENE_DISTANCE text,
STRONGEST_SNP_RISK_ALLELE text,
SNPS text,
MERGED text,
SNP_ID_CURRENT text,
CONTEXT_ text,
INTERGENIC text
);
CREATE TABLE enfermedad_loci
(
id_enfermedad int,
id_loci int,
PRIMARY KEY (id_enfermedad, id_loci),
foreign key (id_enfermedad) references enfermedad(id_enfermedad),
foreign key (id_loci) references loci(id_loci)
);
```
## Ejercicio 2
Lea el archivo y guarde la infomación en la base de datos en las tablas que se definidieron en el __Ejercicio 1__.
```
#Leer el archivo
df.head(1)
import mysql.connector
cnx = mysql.connector.connect(user='root', password='fnsQFJ14',
host='127.0.0.1', database='new_schema')
hostname = '127.0.0.1'
username = 'root'
password = 'fnsQFJ14'
database = 'new_schema'
def doQuery( conn ) :
cur = conn.cursor()
cur.execute( "select * from platform" )
for id_nombre, nombre_plat in cur.fetchall() :
print (id_nombre, nombre_plat)
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
doQuery( myConnection )
myConnection.close()
# Guardar informacion de plataforma
def get_platformId(names_platf):
cur = myConnection.cursor()
cur.execute( """select * from platform where PLATFORM_SNPS_PASSplatformING_QC = "%s" """ % (names_platf) )
id_plat = None
for id_, nombre_plat in cur.fetchall() :
id_plat = id_
if not id_plat:
print("""insert into platform values (NULL, "%s" )""" % (names_platf))
cur.execute("""insert into platform values (NULL, "%s" )""" % (names_platf))
cur.execute("SELECT LAST_INSERT_ID()")
id_plat = cur.fetchall()[0][0]
myConnection.commit()
return id_plat
hostname = '127.0.0.1'
username = 'root'
password = 'fnsQFJ14'
database = 'new_schema'
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
for index, row in df.iterrows():
plat_name = row['PLATFORM [SNPS PASSING QC]']
platform_id = get_platformId(plat_name)
print()
myConnection.close()
# Guardar informacion de journal
def get_journalId(names_journal):
cur = myConnection.cursor()
cur.execute( """select * from journal where namejournal = "%s" """ % (names_journal) )
id_jour = None
for id_, nombre_journ in cur.fetchall() :
id_jour = id_
if not id_jour:
print("""insert into journal values (NULL, "%s" )""" % (names_journal))
cur.execute("""insert into journal values (NULL, "%s" )""" % (names_journal))
cur.execute("SELECT LAST_INSERT_ID()")
id_jour = cur.fetchall()[0][0]
myConnection.commit()
return id_jour
hostname = '127.0.0.1'
username = 'root'
password = 'fnsQFJ14'
database = 'new_schema'
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
for index, row in df.iterrows():
journal_name = row['JOURNAL']
journal_id = get_journalId(journal_name)
print()
myConnection.close()
# Guardar información de enferemdad
def get_enfermedadlId(names_enferm):
cur = myConnection.cursor()
cur.execute( """select * from enfermedad where DISEASETRAIT = "%s" """ % (names_enferm) )
id_enfer = None
for id_, id_platform in cur.fetchall() :
id_enfer = id_
if not id_enfer:
print("""insert into enfermedad values (NULL, "%s" )""" % (names_enferm))
cur.execute("""insert into enfermedad values (NULL, "%s" )""" % (names_enferm))
cur.execute("SELECT LAST_INSERT_ID()")
id_enfer = cur.fetchall()[0][0]
myConnection.commit()
return id_enfer
hostname = '127.0.0.1'
username = 'root'
password = 'fnsQFJ14'
database = 'new_schema'
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
for index, row in df.iterrows():
enf_name = row['DISEASE/TRAIT']
enferm_id = get_enfermedadlId(enf_name)
print()
myConnection.close()
#Guardar información loci
def get_lociId(regionloci, chro, chrpos, repor, mappede, geneups, genedows, snp, upstr, downs, riskalle, snps, merged,
snpid, contexts, intergenic):
cur = myConnection.cursor()
cur.execute( """select id_loci, CHR_ID, CHR_POS from loci where CHR_ID = "%s" and CHR_POS = "%s"
""" %(chro, chrpos))
id_loci = None
for id_, chrm, pos in cur.fetchall() :
print(id_)
id_loci = id_
if not id_loci:
#print("""insert into loci values (NULL, "%s", "%s", "%s", "%s", "%s","%s", "%s", "%s", "%s", "%s", "%s","%s",
#"%s", "%s", "%s", "%s")""" % (regionloci, chro, chrpos, repor, mappede, geneups, genedows, snp, upstr, downs,
#riskalle, snps, merged, snpid, contexts, intergenic))
cur.execute("""insert into loci values (NULL, "%s", "%s", "%s", "%s", "%s","%s", "%s", "%s", "%s", "%s", "%s","%s",
"%s", "%s", "%s", "%s")""" % (regionloci, chro, chrpos, repor, mappede, geneups, genedows, snp, upstr, downs,
riskalle, snps, merged, snpid, contexts, intergenic))
cur.execute("SELECT LAST_INSERT_ID()")
id_loci = cur.fetchall()[0][0]
myConnection.commit()
return id_loci
hostname = '127.0.0.1'
username = 'root'
password = 'fnsQFJ14'
database = 'new_schema'
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
for index, row in df.iterrows():
#print(type(loci[0]), type(loci[1]), type(loci[2]), type(loci[3]), type(loci[4]), type(loci[5]), type(loci[6]),
#type(loci[7]),type(loci[8]), type(loci[9]), type(loci[10]), type(loci[11]), type(loci[12]),
#type(loci[13]), type(loci[14]),type(loci[15]))
loci = [row['REGION'], row['CHR_ID'], row['CHR_POS'],row['REPORTED GENE(S)'],row['MAPPED_GENE'],row['UPSTREAM_GENE_ID'],
row['DOWNSTREAM_GENE_ID'], row['SNP_GENE_IDS'],row['UPSTREAM_GENE_DISTANCE'],row['DOWNSTREAM_GENE_DISTANCE'],
row['STRONGEST SNP-RISK ALLELE'],row['SNPS'], row['MERGED'], row['SNP_ID_CURRENT'], row['CONTEXT'],
row['INTERGENIC']]
loci_id = get_lociId(loci[0], loci[1], loci[2], loci[3], loci[4], loci[5], loci[6], loci[7], loci[8], loci[9], loci[10],
loci[11], loci[12], loci[13], loci[14], loci[15])
print()
myConnection.close()
#Tabla intermedia: Enfermedad_loci
hostname = '127.0.0.1'
username = 'root'
password = 'fnsQFJ14'
database = 'new_schema'
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
cur = myConnection.cursor()
for index, row in df.iterrows():
enfermedadAux = row['DISEASE/TRAIT']
cur.execute("""select id_enfermedad from enfermedad where DISEASETRAIT = "%s" """ % (enfermedadAux) )
idenferAux=cur.fetchall()
cur.execute("""select id_loci from loci where CHR_ID = "%s" and CHR_POS = "%s" """ % (row.CHR_ID, row.CHR_POS))
idlociAux=cur.fetchall()
#print(idenferAux[0][0],idlociAux[0][0])
cur.execute("""select * from enfermedad_loci where id_loci = "%d" and id_enfermedad = "%d" """ %(idlociAux[0][0],
idenferAux[0][0]))
idiguales = cur.fetchall()
#print(idiguales)
if not idiguales:
cur.execute("""insert into enfermedad_loci (id_enfermedad, id_loci) values ("%d", "%d")""" %(idenferAux[0][0],
idlociAux[0][0]))
myConnection.commit()
myConnection.close()
```
## Ejercicio 3
Realice de la base de datos una consulta que le responda una pregunta biológica
(e.g. qué genes estan relacionados con cuales enfermedades)
```
#¿Cuáles genes se encuentran relacionados con el cáncer de pulmón?
#Se responde a la anterior pregunta y se expone el cromosoma en el cuál se encuentra dicho gen.
hostname = '127.0.0.1'
username = 'root'
password = 'fnsQFJ14'
database = 'new_schema'
def doQuery( conn ) :
cur = conn.cursor()
myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
doQuery( myConnection )
#myConnection.close()
cur = myConnection.cursor()
cur.execute("""SELECT r.CHR_ID, r.REPORTED_GENE
FROM loci r
WHERE NOT EXISTS (SELECT * FROM enfermedad i
WHERE DISEASETRAIT IN ('Lung cancer')
AND NOT EXISTS
(SELECT * FROM enfermedad_loci ri
WHERE ri.id_loci = r.id_loci
AND ri.id_enfermedad = i.id_enfermedad))
""")
enferme = cur.fetchall()
print(enferme)
```
## Ejercicio 4
Guarde el resultado de la consulta anterior en un archivo csv
```
import pandas as pd
my_df = pd.DataFrame(enferme)
my_df.to_csv('output.csv', index=False, header=False)
```
| github_jupyter |
# [deplacy](https://koichiyasuoka.github.io/deplacy/)을 사용한 문법 분석
## [Camphr-Udify](https://camphr.readthedocs.io/en/latest/notes/udify.html)로 분석
```
!pip install deplacy camphr 'unofficial-udify>=0.3.0' en-udify@https://github.com/PKSHATechnology-Research/camphr_models/releases/download/0.7.0/en_udify-0.7.tar.gz
import pkg_resources,imp
imp.reload(pkg_resources)
import spacy
nlp=spacy.load("en_udify")
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [UDPipe 2](http://ufal.mff.cuni.cz/udpipe/2)로 분석
```
!pip install deplacy
def nlp(t):
import urllib.request,urllib.parse,json
with urllib.request.urlopen("https://lindat.mff.cuni.cz/services/udpipe/api/process?model=ko_gsd&tokenizer&tagger&parser&data="+urllib.parse.quote(t)) as r:
return json.loads(r.read())["result"]
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [Trankit](https://github.com/nlp-uoregon/trankit)로 분석
```
!pip install deplacy trankit transformers
import trankit
nlp=trankit.Pipeline("korean")
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [spaCy-jPTDP](https://github.com/KoichiYasuoka/spaCy-jPTDP)로 분석
```
!pip install deplacy spacy_jptdp
import spacy_jptdp
nlp=spacy_jptdp.load("ko_gsd")
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [Turku-neural-parser-pipeline](https://turkunlp.org/Turku-neural-parser-pipeline/)로 분석
```
!pip install deplacy ufal.udpipe configargparse 'tensorflow<2' torch==0.4.1 torchtext==0.3.1 torchvision==0.2.1
!test -d Turku-neural-parser-pipeline || git clone --depth=1 https://github.com/TurkuNLP/Turku-neural-parser-pipeline
!cd Turku-neural-parser-pipeline && git submodule update --init --recursive && test -d models_ko_gsd || python fetch_models.py ko_gsd
import sys,subprocess
nlp=lambda t:subprocess.run([sys.executable,"full_pipeline_stream.py","--gpu","-1","--conf","models_ko_gsd/pipelines.yaml"],cwd="Turku-neural-parser-pipeline",input=t,encoding="utf-8",stdout=subprocess.PIPE).stdout
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [spacy-udpipe](https://github.com/TakeLab/spacy-udpipe)로 분석
```
!pip install deplacy spacy-udpipe
import spacy_udpipe
spacy_udpipe.download("ko-gsd")
nlp=spacy_udpipe.load("ko-gsd")
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [NLP-Cube](https://github.com/Adobe/NLP-Cube)로 분석
```
!pip install deplacy nlpcube
from cube.api import Cube
nlp=Cube()
nlp.load("ko")
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [spaCy-COMBO](https://github.com/KoichiYasuoka/spaCy-COMBO)로 분석
```
!pip install deplacy spacy_combo
import spacy_combo
nlp=spacy_combo.load("ko_gsd")
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
## [Stanza](https://stanfordnlp.github.io/stanza)로 분석
```
!pip install deplacy stanza
import stanza
stanza.download("ko")
nlp=stanza.Pipeline("ko")
doc=nlp("홍시 맛이 나서 홍시라 생각한다.")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
```
| github_jupyter |
# Lightweight Networks and MobileNet
We have seen that complex networks require significant computational resources, such as GPU, for training, and also for fast inference. However, it turns out that a model with significanly smaller number of parameters in most cases can still be trained to perform resonably well. In other words, increase in the model complexity typically results in small (non-proportional) increase in the model performance.
We have observed this in the beginning of the module when training MNIST digit classification. The accuracy of simple dense model was not significanly worse than that of a powerful CNN. Increasing the number of CNN layers and/or number of neurons in the classifier allowed us to gain a few percents of accuracy at most.
This leads us to the idea that we can experiment with Lightweight network architectures in order to train faster models. This is especially important if we want to be able to execute our models on mobile devices.
This module will rely on the Cats and Dogs dataset that we have downloaded in the previous unit. First we will make sure that the dataset is available.
```
import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
from torchinfo import summary
import os
from pytorchcv import train, display_dataset, train_long, load_cats_dogs_dataset, validate, common_transform
if not os.path.exists('data/kagglecatsanddogs_3367a.zip'):
!wget -P data -q http://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip
dataset, train_loader, test_loader = load_cats_dogs_dataset()
```
## MobileNet
In the previous unit, we have seen **ResNet** architecture for image classification. More lightweight analog of ResNet is **MobileNet**, which uses so-called *Inverted Residual Blocks*. Let's load pre-trained mobilenet and see how it works:
```
model = torch.hub.load('pytorch/vision:v0.6.0', 'mobilenet_v2', pretrained=True)
model.eval()
print(model)
```
Let's apply the model to our dataset and make sure that it works.
```
sample_image = dataset[0][0].unsqueeze(0)
res = model(sample_image)
print(res[0].argmax())
```
**Exercise:** Compare the number of parameters in MobileNet and full-scale ResNet model.
## Using MobileNet for Transfer Learning
Now let's perform the same transfer learning process as in previous unit, but using MobileNet. First of all, let's freeze all parameters of the model:
```
for x in model.parameters():
x.requires_grad = False
```
Then, replace the final classifier. We also transfer the model to our default training device (GPU or CPU):
```
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.classifier = nn.Linear(1280,2)
model = model.to(device)
summary(model,input_size=(1,3,244,244))
```
Now let's do the actual training:
```
train_long(model,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90)
```
## Takeaway
Notice that MobileNet results in almost the same accuracy as VGG-16, and just slightly lower than full-scale ResNet.
The main advantage of small models, such as MobileNet or ResNet-18 is that they can be used on mobile devices. [Here](https://pytorch.org/mobile/android/) is official example of using ResNet-18 on Android device, and [here](https://heartbeat.fritz.ai/pytorch-mobile-image-classification-on-android-5c0cfb774c5b) is similar example using MobileNet.
| github_jupyter |
## This notebook Contains:
- Taking scraped input(HTML formatted code)
- Cleaning, Data Preprocessing and Feature Engineering on the data set
- Importing the Cleaned CSV File
```
# imoporting libraries
import pandas as pd
import os
from bs4 import BeautifulSoup
import re
# Reading the list of files inside the HTML_FILES folder
allfileslist = os.listdir("../../Data/HTML_FILES/")
# Concatenating all the files in the HTML_FILES folder
combined_csv = pd.concat( [ pd.read_csv("../../Data/HTML_FILES/"+f) for f in allfileslist ] )
# count of accept and reject
combined_csv.status.value_counts()
combined_csv.loc[combined_csv['status']=='acccept',"status"]="accept"
combined_csv.status.value_counts()
#filtering out empty records
combined_csv=combined_csv.loc[~(combined_csv['links']=="[]"),:]
#removing empty records
combined_csv.drop(columns='Unnamed: 0',inplace=True)
combined_csv.reset_index(drop=True,inplace=True)
# Changing university name in proper naming convention
combined_csv.loc[combined_csv.loc[:,'university_name']=="illinois_institute_of_technology_accept","university_name"]="illinois_institute_of_technology"
# Changing university name in proper naming convention
combined_csv.loc[combined_csv.loc[:,'university_name']=="university of california, irvine","university_name"]="university_of_california_irvine"
# Changing university name in proper naming convention
combined_csv.loc[combined_csv.loc[:,'university_name']=="clemson_university_accept","university_name"]="clemson_university"
combined_csv.loc[combined_csv.loc[:,'university_name']=="clemson_university_reject","university_name"]="clemson_university"
# Changing university name in proper naming convention
combined_csv.loc[combined_csv.loc[:,'university_name']=="university_of_texas_dallas_accept","university_name"]="university_of_texas_dallas"
combined_csv.loc[combined_csv.loc[:,'university_name']=="university_of_texas_dallas_reject","university_name"]="university_of_texas_dallas"
# Accept and Reject for every university with percentage of accept and reject
combined_csv.groupby(by=["university_name"])['status'].value_counts(normalize=True)
# shape of the datset
combined_csv.shape
#unwrapping stored html pages and extracting features from html tags
html_pages = combined_csv.links.tolist()
temp = []
# Function to unwrap the html
for i in html_pages:
soup = BeautifulSoup(i)
a = soup.find_all('div', class_ = 'col-sm-4 col-xs-4')
temp_inside = []
for x in a:
k =(x.h4.text)
t=[j for j in k.strip().split("\n") if len(j) is not 0]
temp_inside.append(t)
temp.append(temp_inside)
temp[0:1]
# getting all the profile data in nested list and extracting it
all=[]
for each in temp:
list = []
for i in each:
for j in i:
list.append(j)
all.append(list)
#verifing if we have unpacked all html pages collected correctly
len(all)
all[0]
#we will make a new dataframe with extracted information from html pages and it's corresponding university name and status
university_list=combined_csv.university_name.tolist()
status_list=combined_csv.status.tolist()
combined_df = pd.DataFrame(all)
combined_df['university_name']=university_list
combined_df['status']=status_list
#naming our features
list_columns = ['gre_score','droping', 'gre_score_quant','gre_score_verbal','test_score_toefl','droping_1', 'undergraduation_score','work_ex', 'papers_published','droping_3','university_name','status']
combined_df.columns = list_columns
combined_df.drop(columns = ['droping','droping_1','droping_3'], inplace=True)
# Null in columns
combined_df.isna().sum()
#filling work experience and work_ex with zero, considering when there are no values given
combined_df=combined_df.fillna(0)
combined_df.head()
```
<b>Data Pre processing and Feature Engineering</b>
- Removing Null values from columns
- Removing noise data, Unformatted Text and Inconsistent Data
- Conversion of % and 10 pinter score in CGPA to 4 pointer
- Toefl and IELTS score to the same scale according to the information available on ETS Official website (https://www.ets.org/toefl/institutions/scores/compare/)
- Including Ranking of University as a column
- Changed paper Published containing column values as NoneInternational/National/Local
```
# Function for removing special charaters
def replace_special_chars(i):
#a = re.sub('[^A-Za-z]+',' ',str(i))
a=re.findall(r'\d+', str(i))
#a = a.lower()
return ''.join(a)
# calling this function for various columns
combined_df['gre_score']=combined_df.gre_score.apply(replace_special_chars)
combined_df['gre_score_quant']=combined_df['gre_score_quant'].apply(replace_special_chars)
combined_df['test_score_toefl'] = combined_df['test_score_toefl'].apply(replace_special_chars)
combined_df['gre_score_verbal'] = combined_df['gre_score_verbal'].apply(replace_special_chars)
combined_df['work_ex'] = combined_df['work_ex'].apply(replace_special_chars)
combined_df["undergraduation_score"] = [x.replace('CGPA','') for x in combined_df["undergraduation_score"]]
combined_df["undergraduation_score"] = [x.replace('%','') for x in combined_df["undergraduation_score"]]
combined_df["papers_published"] = [str(x).replace('Tech Papers','') for x in combined_df["papers_published"]]
# data type for multiple columns
combined_df.dtypes
combined_df.loc[combined_df['work_ex']=='','work_ex']=0
values=[]
for each in combined_df.undergraduation_score.unique():
try:
float(each)
except:
values.append(each)
for each in values:
combined_df=combined_df[combined_df.undergraduation_score!=each]
combined_df[['gre_score','gre_score_quant','gre_score_verbal','test_score_toefl','undergraduation_score','work_ex']]=combined_df[['gre_score','gre_score_quant','gre_score_verbal','test_score_toefl','undergraduation_score','work_ex']].apply(pd.to_numeric)
combined_df=combined_df.loc[~(combined_df.test_score_toefl.isna()),:]
combined_df.isna().sum()
combined_df.reset_index(drop=True,inplace=True)
# function to scale the cgpa on the scale of 4
update_cgpa_score_scale_4 = []
for score in combined_df.undergraduation_score.tolist():
s = 0
try:
score = float(score)
except:
score= 0
if score > 10:
s = ((score)/20) - 1
s = round(s,2)
update_cgpa_score_scale_4.append(s)
else:
s = ((score)/10)*4
s = round(s,2)
update_cgpa_score_scale_4.append(s)
combined_df['undergraduation_score']=update_cgpa_score_scale_4
combined_df.loc[combined_df['test_score_toefl']<9,'test_score_toefl']=pd.cut(combined_df.loc[combined_df['test_score_toefl']<9,'test_score_toefl'], bins=[-1,0.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9], labels=[0,31,34,45,59,78,93,101,109,114,117,120])
combined_df.loc[combined_df['test_score_toefl']<9,'test_score_toefl'].value_counts()
```
##### working on the paper published column to assign values: International as 3, National as 2, Local as 1 and None as 0
```
combined_df.papers_published.unique()
#df_all_neu["papers_published"] = [x.replace('','0') for x in df_all_neu["papers_published"]]
combined_df["papers_published"] = [x.replace('None','0') for x in combined_df["papers_published"]]
combined_df["papers_published"] = [x.replace('NA','0') for x in combined_df["papers_published"]]
combined_df.papers_published.value_counts()
combined_df.loc[combined_df['papers_published'] == 'Local', 'papers_published'] = '1'
combined_df.loc[combined_df['papers_published'] == 'International', 'papers_published'] = '3'
combined_df.loc[combined_df['papers_published'] == 'National', 'papers_published'] = '2'
list_ppr_pub = combined_df.papers_published.tolist()
new_list_ppr_pub = []
for i in list_ppr_pub:
if i == '':
new_list_ppr_pub.append('0')
else:
new_list_ppr_pub.append(i)
combined_df['papers_published'] = new_list_ppr_pub
combined_df['papers_published'] = combined_df['papers_published'].astype(int)
combined_df.describe()
```
#### checking and removing incorrect record
Gre quant/verbal >170 and <130
```
combined_df.loc[(combined_df['gre_score_quant'] <130) | (combined_df['gre_score_verbal'] < 130) | (combined_df['gre_score'] < 260),:]
combined_df = combined_df.loc[~((combined_df['gre_score_quant'] <130) | (combined_df['gre_score_verbal'] < 130) | (combined_df['gre_score'] < 260)),:]
# No null columns remaining
combined_df.isna().sum()
def replace_special_chars_university_name(i):
a = re.sub('[^A-Za-z]+',' ',str(i))
#a=re.findall(r'\d+', str(i))
a = a.lower()
return '_'.join(a.split(' '))
#replacing special characters and spaces in university name
combined_df.loc[:,"university_name"]=combined_df.university_name.apply(replace_special_chars_university_name)
required_colleges=combined_df.university_name.unique().tolist()
len(required_colleges)
required_colleges=['northeastern_university','illinois_institute_of_technology','michigan_technological_university','rochester_institute_of_technology','university_of_southern_california','north_carolina_state_university_raleigh','university_of_texas_arlington','university_of_texas_dallas','syracuse_university','clemson_university','new_york_university','indiana_university_bloomington','rutgers_university_new_brunswick', "---",'university_of_florida','carnegie_mellon_university','georgia_institiute_of_technology','university_of_colorado_boulder','university_of_north_carolina_at_charlotte','university_of_iowa','university_of_connecticut','worcester_polytechnic_institute','---','kansas_state_university','university_of_cincinnati','university_of_maryland_college_park','university_of_california_irvine','texas_a_m_university_college_station','state_university_of_new_york_at_stony_brook','george_mason_university','university_of_texas_austin']
# Assigining universities with their respective rankings in CS
required_colleges_ranking = [15,97,117,66,19,49,64,52,118,89,22,48,25,150,62,1,9,58,30, 71, 70,79, 76, 115, 130, 10, 23, 31, 35, 59,16]
dictionary_req_college = dict(zip(required_colleges, required_colleges_ranking))
dictionary_req_college
combined_df['ranking'] = combined_df['university_name']
combined_df['ranking'].replace(dictionary_req_college,inplace=True)
# no null values remaining
combined_df.isna().sum()
# cleaned datset
combined_df.head()
# describing the dataset
combined_df.describe()
# transferring CSV file
combined_df.reset_index(drop =True).to_csv('../../Data/clean_profile_data_all.csv',index=False)
```
| github_jupyter |
# AutoEncoders
---
The following code was created by Aymeric Damien. You can find some of his code in <a href="https://github.com/aymericdamien">here</a>. We made some modifications for us to import the datasets to Jupyter Notebooks.
Let's call our imports and make the MNIST data available to use.
```
#from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
```
Now, let's give the parameters that are going to be used by our NN.
```
learning_rate = 0.01
training_epochs = 20
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
```
Now we need to create our encoder. For this, we are going to use sigmoidal functions. Sigmoidal functions delivers great results with this type of network. This is due to having a good derivative that is well-suited to backpropagation. We can create our encoder using the sigmoidal function like this:
```
# Building the encoder
def encoder(x):
# Encoder first layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
# Encoder second layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
return layer_2
```
And the decoder:
You can see that the layer_1 in the encoder is the layer_2 in the decoder and vice-versa.
```
# Building the decoder
def decoder(x):
# Decoder first layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),biases['decoder_b1']))
# Decoder second layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
return layer_2
```
Let's construct our model.
In the variable <code>cost</code> we have the loss function and in the <code>optimizer</code> variable we have our gradient used for backpropagation.
```
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Reconstructed Images
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
```
For training we will run for 20 epochs.
```
# Launch the graph
# Using InteractiveSession (more convenient while using Notebooks)
sess = tf.InteractiveSession()
sess.run(init)
total_batch = int(mnist.train.num_examples / batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
```
Now, let's apply encoder and decoder for our tests.
```
# Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
```
Let's simply visualize our graphs!
```
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
```
As you can see, the reconstructions were successful. It can be seen that some noise were added to the image.
## Thanks for reading :)
Created by [Saeed Aghabozorgi](https://www.linkedin.com/in/saeedaghabozorgi/) and modified by [Tarun Kamboj](https://www.linkedin.com/in/kambojtarun/).
| github_jupyter |
# 4장 판다스 데이터프레임 Part1
## 4.2 데이터프레임 인덱스
```
from pandas import DataFrame
data = [
["037730", "3R", 1510, 7.36],
["036360", "3SOFT", 1790, 1.65],
["005670", "ACTS", 1185, 1.28]
]
columns = ["종목코드", "종목명", "현재가", "등락률"]
df = DataFrame(data=data, columns=columns)
df
from pandas import DataFrame
data = [
["037730", "3R", 1510, 7.36],
["036360", "3SOFT", 1790, 1.65],
["005670", "ACTS", 1185, 1.28]
]
columns = ["종목코드", "종목명", "현재가", "등락률"]
df = DataFrame(data=data, columns=columns)
df = df.set_index("종목코드")
df
from pandas import DataFrame
data = [
["037730", "3R", 1510, 7.36],
["036360", "3SOFT", 1790, 1.65],
["005670", "ACTS", 1185, 1.28]
]
columns = ["종목코드", "종목명", "현재가", "등락률"]
df = DataFrame(data=data, columns=columns)
df.set_index("종목코드", inplace=True)
df
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
df.index.name = "종목코드"
df
df.index
type(df.index)
```
## 4.3 데이터프레임 컬럼 인덱싱
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
print(df['현재가'])
s = df['현재가']
print(s.index)
print(s.values)
리스트 = ["현재가", "등락률"]
df[리스트]
df[['현재가']]
```
## 4.4 데이터프레임 로우 인덱싱
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
df
df.loc["037730"]
print(df.iloc[0])
print(df.iloc[-1])
df.loc[ ["037730", "036360"] ]
df.iloc[[0, 1]]
```
## 4.5 특정 값 가져오기
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
print(df.iloc[0])
print(df.loc['037730'])
# 행번호로 행 선택 후 시리즈 인덱싱
print(df.iloc[0].iloc[1]) # 시리즈 행번호
print(df.iloc[0].loc["현재가"]) # 시리즈 인덱스
print(df.iloc[0]["현재가"]) # 시리즈 인덱스
# 인덱스로 행 선택 후 시리즈 인덱싱
print(df.loc["037730"].iloc[1]) # 시리즈 행번호
print(df.loc["037730"].loc["현재가"]) # 시리즈 인덱스
print(df.loc["037730"]["현재가"]) # 시리즈 인덱스
s = df.iloc[0]
s
print(df.loc["037730", "현재가"])
print(df.iloc[0, 1])
df
print(df['현재가'].iloc[0])
print(df['현재가'].loc["037730"])
print(df['현재가']["037730"])
```
## 4.6 특정 범위 가져오기
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
print(df.loc[["037730", "036360"]])
print(df.iloc[[0, 1]])
df
df.loc[["037730", "036360"], ["종목명", "현재가"]]
df.iloc[ [0, 1], [0, 1] ]
```
## 4.7 데이터프레임 필터링
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
df
cond = df['현재가'] >= 1400
df.loc[cond]
cond = df['현재가'] >= 1400
df.loc[cond]["현재가"]
df.loc[cond, "현재가"]
cond = (df['현재가'] >= 1400) & (df['현재가'] < 1700)
df.loc[cond]
```
## 4.8 컬럼 추가하기
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
df
from pandas import Series
s = Series(data=[1600, 1600, 1600], index=df.index)
df['목표가'] = s
df
s
df["괴리율"] = (df["목표가"] - df["현재가"]) / df['현재가']
df
```
## 4.9 로우 추가하기
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
df
from pandas import Series
s = Series(data=["LG전자", 60000, 3.84], index=df.columns)
df.loc["066570"] = s
df
df.loc["066570"] = ["LG전자", 60000, 3.84]
df
s = Series(data=["LG전자", 60000, 3.84], index=df.columns, name="066570")
df.append(s)
```
## 4.10 컬럼/로우 삭제하기
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
new_df = df.drop("현재가", axis=1)
print(df)
print(new_df)
```
## 4.11 컬럼 레이블 변경
```
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
print(df.columns)
print(df.index)
df
df.columns = ['name', 'close', 'fluctuation']
df.index.name = 'code'
df
from pandas import DataFrame
data = [
["3R", 1510, 7.36],
["3SOFT", 1790, 1.65],
["ACTS", 1185, 1.28]
]
index = ["037730", "036360", "005760"]
columns = ["종목명", "현재가", "등락률"]
df = DataFrame(data=data, index=index, columns=columns)
df.rename(columns={'종목명': 'code'}, inplace=True)
df
```
## 4.12 데이터 타입 변경
```
from pandas import DataFrame
data = [
["1,000", "1,100", '1,510'],
["1,410", "1,420", '1,790'],
["850", "900", '1,185'],
]
columns = ["03/02", "03/03", "03/04"]
df = DataFrame(data=data, columns=columns)
df
def remove_comma(x):
return int(x.replace(',', ''))
df['03/02'] = df['03/02'].map(remove_comma)
df['03/03'] = df['03/03'].map(remove_comma)
df
from pandas import DataFrame
data = [
["1,000", "1,100", '1,510'],
["1,410", "1,420", '1,790'],
["850", "900", '1,185'],
]
columns = ["03/02", "03/03", "03/04"]
df = DataFrame(data=data, columns=columns)
df
df = df.applymap(remove_comma)
df
df.dtypes
```
## 4.13 컬럼 문자열 다루기
```
from pandas import DataFrame
data = [
{"cd":"A060310", "nm":"3S", "close":"2,920"},
{"cd":"A095570", "nm":"AJ네트웍스", "close":"6,250"},
{"cd":"A006840", "nm":"AK홀딩스", "close":"29,700"},
{"cd":"A054620", "nm":"APS홀딩스", "close":"19,400"}
]
df = DataFrame(data=data)
df
df.dtypes
df['cd'] = df['cd'].str[1:]
df
df['close'] = df['close'].str.replace(',', '')
df
```
| github_jupyter |
```
import re
import os
from misc import *
import numpy as np
import pandas as pd
import pickle as pkl
import os.path as op
from tqdm import tqdm
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from scipy.stats import ks_2samp, median_test
from sklearn.metrics import roc_auc_score, roc_curve
from scipy.stats import spearmanr, pearsonr, ks_2samp, chisquare, levene
SHAM = "../data/sham_no stimulation/"
vlPFC = "../data/vlPFC_stimulation/"
enc_on = "../data/enc_on_new/"
enc_off = "../data/enc_off_new/"
eng_sham = "../data/Eng_sham/"
eng_vlPFC = "../data/Eng_vlPFC/"
def get_files(x):
return(
list(
filter(lambda x: ".xlsx" in x, [op.join(x, b) for b in [a for a in os.walk(x)][0][2]])
)
)
sham_study = sum([Study.load_from_file(a) for a in get_files(SHAM)])
vlPFC_study = sum([Study.load_from_file(a) for a in get_files(vlPFC)])
eon_study = sum([Study.load_from_file(a) for a in get_files(enc_on)])
eoff_study = sum([Study.load_from_file(a) for a in get_files(enc_off)])
esham_study = sum([Study.load_from_file(a) for a in get_files(eng_sham)])
evlPFC_study = sum([Study.load_from_file(a) for a in get_files(eng_vlPFC)])
len(sham_study), len(vlPFC_study), len(eon_study), len(eoff_study)
sham_exclude = [
"tDCS3_101_test.xlsx", "tDCS3_103_test.xlsx", "tDCS3_132_test.xlsx",
"tDCS3_198_test.xlsx", "tDCS3_209_test.xlsx", "tDCS3_221_test.xlsx"
]
vl_exclude = ["tDCS3_170_test.xlsx", "tDCS3_207_test.xlsx"]
eon_exclude = [
"tDCS3_110_test (1).xlsx", "tDCS3_114_test (1).xlsx", "tDCS3_113_test (1).xlsx"
]
eoff_exclude = Study.load_from_file(op.join(enc_off, "tDCS3_129_test.xlsx"))
sham_exclude = sum([Study.load_from_file(op.join(SHAM, a)) for a in sham_exclude])
vl_exclude = sum([Study.load_from_file(op.join(vlPFC, a)) for a in vl_exclude])
eon_exclude = sum([Study.load_from_file(op.join(enc_on, a)) for a in eon_exclude])
sham_study = sham_study-sham_exclude
vlPFC_study = vlPFC_study-vl_exclude
eon_study = eon_study-eon_exclude
eoff_study = eoff_study-eoff_exclude
len(sham_study), len(vlPFC_study), len(eon_study), len(eoff_study)
fnames = [
"sham_study.pkl", "vlPFC_study.pkl", "eon_study.pkl", "eoff_study.pkl",
"esham_study.pkl", "evlPFC_study.pkl"
]
for a,b in zip([sham_study, vlPFC_study, eon_study, eoff_study, esham_study, evlPFC_study], fnames):
a.save(b)
```
### Figure 1
```
from pylab import rcParams
rcParams['figure.figsize'] = 16, 6
rcParams['font.size'] = 16
plt.subplot(1,2,1)
plt.title("A")
plt.violinplot(
[sham_study.compute_study_aucs(), vlPFC_study.compute_study_aucs()]#, eon_auc, eoff_auc]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Sham", "VLPFC online"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.ylabel("AUROC")
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.subplot(1,2,2)
plt.violinplot(
[esham_study.compute_study_aucs(), evlPFC_study.compute_study_aucs()]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Sham", "VLPFC Online"])
#plt.grid(True)
plt.ylim(0,1)
plt.title("B")
#plt.ylabel("AUC")
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.show()
plt.subplot(1,2,1)
plt.boxplot(
[sham_study.compute_study_aucs(), vlPFC_study.compute_study_aucs()], notch=True#, eon_auc, eoff_auc]
)
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Sham", "VLPFC online"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.ylabel("AUC")
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.subplot(1,2,2)
plt.boxplot(
[esham_study.compute_study_aucs(), evlPFC_study.compute_study_aucs()], notch=True
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Sham", "VLPFC Online"])
#plt.grid(True)
plt.ylim(0,1)
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.ylabel("AUC")
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.show()
median_test(sham_study.compute_study_aucs(), vlPFC_study.compute_study_aucs())
```
### Figure 2
```
from pylab import rcParams
rcParams['figure.figsize'] = 16, 10
ru_study = sham_study+vlPFC_study#+eoff_study+eon_study
en_study = esham_study+evlPFC_study
ru_study_all = ru_study+eoff_study+eon_study
ru_study_all.save("ru_study.pkl")
en_study.save("en_study.pkl")
ru_word_aucs = ru_study.compute_word_aucs()
en_word_aucs = en_study.compute_word_aucs()
rusham_word_aucs = sham_study.compute_word_aucs()
esham_word_aucs = esham_study.compute_word_aucs()
ruvlPFC_word_aucs = vlPFC_study.compute_word_aucs()
evlPFC_word_aucs = evlPFC_study.compute_word_aucs()
plt.subplot(1,3,1)
plt.title("A")
plt.violinplot(
[ru_word_aucs["AUROC"], en_word_aucs["AUROC"]]#, eon_auc, eoff_auc]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Russian", "English"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.ylabel("AUROC")
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.subplot(1,3,2)
plt.title("B")
plt.violinplot(
[rusham_word_aucs["AUROC"], esham_word_aucs["AUROC"]]#, eon_auc, eoff_auc]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Russian", "English"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.subplot(1,3,3)
plt.title("C")
plt.violinplot(
[ruvlPFC_word_aucs["AUROC"], evlPFC_word_aucs["AUROC"]]#, eon_auc, eoff_auc]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Russian", "English"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.show()
plt.subplot(1,3,1)
plt.title("A")
plt.boxplot(
[ru_word_aucs["AUROC"], en_word_aucs["AUROC"]], notch=True#, eon_auc, eoff_auc]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Russian", "English"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.ylabel("AUROC")
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.subplot(1,3,2)
plt.title("B")
plt.boxplot(
[rusham_word_aucs["AUROC"], esham_word_aucs["AUROC"]], notch=True#, eon_auc, eoff_auc]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Russian", "English"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.subplot(1,3,3)
plt.title("C")
plt.boxplot(
[ruvlPFC_word_aucs["AUROC"], evlPFC_word_aucs["AUROC"]], notch=True#, eon_auc, eoff_auc]
)
plt.xlim(0.5, 2.5)
plt.xticks([1,2], ["Russian", "English"])#, "DLPFC online", "DLPFC offline"])
#plt.grid(True)
plt.ylim(0,1)
plt.axhline(y=0.5, xmin=0, xmax=3, color="red")
plt.legend(
loc="best",
handles=[Patch(color="red", label="AUROC=0.5")]
)
plt.show()
median_test(ruvlPFC_word_aucs["AUROC"], evlPFC_word_aucs["AUROC"])
```
### Figure 3
```
ru_word_RTs = ru_study.get_word_RT()
en_word_RTs = en_study.get_word_RT()
ru_RTs = np.array(sum([ru_word_RTs[a] for a in ru_word_RTs], []))
en_RTs = np.array(sum([en_word_RTs[a] for a in en_word_RTs], []))
ru_RTs = ru_study.get_all_RT()
en_RTs = en_study.get_all_RT()
plt.hist(Study.clamp(ru_RTs, 2), bins=20, color=(1,0,0,0.5))
plt.hist(Study.clamp(en_RTs, 2), bins=20, color=(0,0,1,0.5))
plt.legend(
loc="best",
handles=[
Patch(color=(1,0,0,0.5), label="Russian"),
Patch(color=(0,0,1,0.5), label="English"),
]
)
plt.xlabel("Reaction time")
plt.ylabel("#words")
plt.show()
ks_2samp(
Study.clamp(ru_RTs, 2), Study.clamp(en_RTs, 2)
)
median_test(
Study.clamp(ru_RTs, 2), Study.clamp(en_RTs, 2)
)
median_test(
ru_RTs, en_RTs
)
```
| github_jupyter |
# Torch Hub Detection Inference Tutorial
In this tutorial you'll learn:
- how to load a pretrained detection model using Torch Hub
- run inference to detect actions in a demo video
## NOTE:
At the moment tutorial only works if ran on local clone from the directory `pytorchvideo/tutorials/video_detection_example`
### Install and Import modules
If `torch`, `torchvision`, `cv2` and `pytorchvideo` are not installed, run the following cell:
```
try:
import torch
except ModuleNotFoundError:
!pip install torch torchvision
import os
import sys
import torch
try:
import cv2
except ModuleNotFoundError:
!pip install opencv-python
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
!pip install pytorchvideo
else:
need_pytorchvideo=False
try:
# Running notebook locally
import pytorchvideo
except ModuleNotFoundError:
need_pytorchvideo=True
if need_pytorchvideo:
# Install from GitHub
!pip install "git+https://github.com/facebookresearch/pytorchvideo.git"
from functools import partial
import numpy as np
import cv2
import torch
import detectron2
from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
import pytorchvideo
from pytorchvideo.transforms.functional import (
uniform_temporal_subsample,
short_side_scale_with_boxes,
clip_boxes_to_image,
)
from torchvision.transforms._functional_video import normalize
from pytorchvideo.data.ava import AvaLabeledVideoFramePaths
from pytorchvideo.models.hub import slow_r50_detection # Another option is slowfast_r50_detection
from visualization import VideoVisualizer
```
## Load Model using Torch Hub API
PyTorchVideo provides several pretrained models through Torch Hub. Available models are described in [model zoo documentation.](https://github.com/facebookresearch/pytorchvideo/blob/main/docs/source/model_zoo.md)
Here we are selecting the slow_r50_detection model which was trained using a 4x16 setting on the Kinetics 400 dataset and
fine tuned on AVA V2.2 actions dataset.
NOTE: to run on GPU in Google Colab, in the menu bar selet: Runtime -> Change runtime type -> Harware Accelerator -> GPU
```
device = 'cuda' # or 'cpu'
video_model = slow_r50_detection(True) # Another option is slowfast_r50_detection
video_model = video_model.eval().to(device)
```
## Load an off-the-shelf Detectron2 object detector
We use the object detector to detect bounding boxes for the people.
These bounding boxes later feed into our video action detection model.
For more details, please refer to the Detectron2's object detection tutorials.
To install Detectron2, please follow the instructions mentioned [here](https://github.com/facebookresearch/detectron2/blob/main/INSTALL.md)
```
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.55 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
# This method takes in an image and generates the bounding boxes for people in the image.
def get_person_bboxes(inp_img, predictor):
predictions = predictor(inp_img.cpu().detach().numpy())['instances'].to('cpu')
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = np.array(predictions.pred_classes.tolist() if predictions.has("pred_classes") else None)
predicted_boxes = boxes[np.logical_and(classes==0, scores>0.75 )].tensor.cpu() # only person
return predicted_boxes
```
## Define the transformations for the input required by the model
Before passing the video and bounding boxes into the model we need to apply some input transforms and sample a clip of the correct frame rate in the clip.
Here, below we define a method that can pre-process the clip and bounding boxes. It generates inputs accordingly for both Slow (Resnet) and SlowFast models depending on the parameterization of the variable `slow_fast_alpha`.
```
def ava_inference_transform(
clip,
boxes,
num_frames = 4, #if using slowfast_r50_detection, change this to 32
crop_size = 256,
data_mean = [0.45, 0.45, 0.45],
data_std = [0.225, 0.225, 0.225],
slow_fast_alpha = None, #if using slowfast_r50_detection, change this to 4
):
boxes = np.array(boxes)
ori_boxes = boxes.copy()
# Image [0, 255] -> [0, 1].
clip = uniform_temporal_subsample(clip, num_frames)
clip = clip.float()
clip = clip / 255.0
height, width = clip.shape[2], clip.shape[3]
# The format of boxes is [x1, y1, x2, y2]. The input boxes are in the
# range of [0, width] for x and [0,height] for y
boxes = clip_boxes_to_image(boxes, height, width)
# Resize short side to crop_size. Non-local and STRG uses 256.
clip, boxes = short_side_scale_with_boxes(
clip,
size=crop_size,
boxes=boxes,
)
# Normalize images by mean and std.
clip = normalize(
clip,
np.array(data_mean, dtype=np.float32),
np.array(data_std, dtype=np.float32),
)
boxes = clip_boxes_to_image(
boxes, clip.shape[2], clip.shape[3]
)
# Incase of slowfast, generate both pathways
if slow_fast_alpha is not None:
fast_pathway = clip
# Perform temporal sampling from the fast pathway.
slow_pathway = torch.index_select(
clip,
1,
torch.linspace(
0, clip.shape[1] - 1, clip.shape[1] // slow_fast_alpha
).long(),
)
clip = [slow_pathway, fast_pathway]
return clip, torch.from_numpy(boxes), ori_boxes
```
## Setup
Download the id to label mapping for the AVA V2.2 dataset on which the Torch Hub models were finetuned.
This will be used to get the category label names from the predicted class ids.
Create a visualizer to visualize and plot the results(labels + bounding boxes).
```
!wget https://dl.fbaipublicfiles.com/pytorchvideo/data/class_names/ava_action_list.pbtxt
# Create an id to label name mapping
label_map, allowed_class_ids = AvaLabeledVideoFramePaths.read_label_map('ava_action_list.pbtxt')
# Create a video visualizer that can plot bounding boxes and visualize actions on bboxes.
video_visualizer = VideoVisualizer(81, label_map, top_k=3, mode="thres",thres=0.5)
```
## Load an example video
We get an opensourced video off the web from WikiMedia.
```
!wget https://dl.fbaipublicfiles.com/pytorchvideo/projects/theatre.webm
# Load the video
encoded_vid = pytorchvideo.data.encoded_video.EncodedVideo.from_path('theatre.webm')
print('Completed loading encoded video.')
```
## Generate bounding boxes and action predictions for a 10 second clip in the video.
```
# Video predictions are generated at an internal of 1 sec from 90 seconds to 100 seconds in the video.
time_stamp_range = range(90,100) # time stamps in video for which clip is sampled.
clip_duration = 1.0 # Duration of clip used for each inference step.
gif_imgs = []
for time_stamp in time_stamp_range:
print("Generating predictions for time stamp: {} sec".format(time_stamp))
# Generate clip around the designated time stamps
inp_imgs = encoded_vid.get_clip(
time_stamp - clip_duration/2.0, # start second
time_stamp + clip_duration/2.0 # end second
)
inp_imgs = inp_imgs['video']
# Generate people bbox predictions using Detectron2's off the self pre-trained predictor
# We use the the middle image in each clip to generate the bounding boxes.
inp_img = inp_imgs[:,inp_imgs.shape[1]//2,:,:]
inp_img = inp_img.permute(1,2,0)
# Predicted boxes are of the form List[(x_1, y_1, x_2, y_2)]
predicted_boxes = get_person_bboxes(inp_img, predictor)
if len(predicted_boxes) == 0:
print("Skipping clip no frames detected at time stamp: ", time_stamp)
continue
# Preprocess clip and bounding boxes for video action recognition.
inputs, inp_boxes, _ = ava_inference_transform(inp_imgs, predicted_boxes.numpy())
# Prepend data sample id for each bounding box.
# For more details refere to the RoIAlign in Detectron2
inp_boxes = torch.cat([torch.zeros(inp_boxes.shape[0],1), inp_boxes], dim=1)
# Generate actions predictions for the bounding boxes in the clip.
# The model here takes in the pre-processed video clip and the detected bounding boxes.
if isinstance(inputs, list):
inputs = [inp.unsqueeze(0).to(device) for inp in inputs]
else:
inputs = inputs.unsqueeze(0).to(device)
preds = video_model(inputs, inp_boxes.to(device))
preds= preds.to('cpu')
# The model is trained on AVA and AVA labels are 1 indexed so, prepend 0 to convert to 0 index.
preds = torch.cat([torch.zeros(preds.shape[0],1), preds], dim=1)
# Plot predictions on the video and save for later visualization.
inp_imgs = inp_imgs.permute(1,2,3,0)
inp_imgs = inp_imgs/255.0
out_img_pred = video_visualizer.draw_clip_range(inp_imgs, preds, predicted_boxes)
gif_imgs += out_img_pred
print("Finished generating predictions.")
```
## Save predictions as video
The generated video consists of bounding boxes with predicted actions for each bounding box.
```
height, width = gif_imgs[0].shape[0], gif_imgs[0].shape[1]
vide_save_path = 'output_detections.mp4'
video = cv2.VideoWriter(vide_save_path,cv2.VideoWriter_fourcc(*'DIVX'), 7, (width,height))
for image in gif_imgs:
img = (255*image).astype(np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
video.write(img)
video.release()
print('Predictions are saved to the video file: ', vide_save_path)
```
| github_jupyter |
## What is a Variable?
A variable is any characteristic, number, or quantity that can be measured or counted. The following are examples of variables:
- Age (21, 35, 62, ...)
- Gender (male, female)
- Income (GBP 20000, GBP 35000, GBP 45000, ...)
- House price (GBP 350000, GBP 570000, ...)
- Country of birth (China, Russia, Costa Rica, ...)
- Eye colour (brown, green, blue, ...)
- Vehicle make (Ford, Volkswagen, ...)
They are called 'variables' because the value they take may vary (and it usually does) in a population.
Most variables in a data set can be classified into one of two major types:
- **Numerical variables**
- **Categorical variables**
===================================================================================
## Numerical variables
The values of a numerical variable are numbers. They can be further classified into discrete and continuous variables.
### Discrete numerical variable
A variable which values are whole numbers (counts) is called discrete. For example, the number of items bought by a customer in a supermarket is discrete. The customer can buy 1, 25, or 50 items, but not 3.7 items. It is always a round number. The following are examples of discrete variables:
- Number of active bank accounts of a borrower (1, 4, 7, ...)
- Number of pets in the family
- Number of children in the family
### Continuous numerical variable
A variable that may contain any value within some range is called continuous. For example, the total amount paid by a customer in a supermarket is continuous. The customer can pay, GBP 20.5, GBP 13.10, GBP 83.20 and so on.
Other examples of continuous variables are:
- House price (in principle, it can take any value) (GBP 350000, 57000, 1000000, ...)
- Time spent surfing a website (3.4 seconds, 5.10 seconds, ...)
- Total debt as percentage of total income in the last month (0.2, 0.001, 0, 0.75, ...)
=============================================================================
## Real Life example: Peer to peer lending (Finance)
### Lending Club
**Lending Club** is a peer-to-peer Lending company based in the US. They match people looking to invest money with people looking to borrow money. When investors invest their money through Lending Club, this money is passed onto borrowers, and when borrowers pay their loans back, the capital plus the interest passes on back to the investors. It is a win for everybody as they can get typically lower loan rates and higher investor returns.
If you want to learn more about Lending Club follow this [link](https://www.lendingclub.com/).
The Lending Club dataset contains complete loan data for all loans issued through 2007-2015, including the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. Features include credit scores, number of finance inquiries, address including zip codes and state, and collections among others. Collections indicates whether the customer has missed one or more payments and the team is trying to recover their money.
The file is a matrix of about 890 thousand observations and 75 variables. More detail on this dataset can be found in [Kaggle's website](https://www.kaggle.com/wendykan/lending-club-loan-data)
Let's go ahead and have a look at the variables!
====================================================================================================
To download the Lending Club loan book from Kaggle go to [this website](https://www.kaggle.com/wendykan/lending-club-loan-data)
Scroll down to the bottom of the page, and click on the link 'loan.csv', and then click the 'download' blue button towards the right of the screen, to download the dataset.
Unzip it, and save it to a directory of your choice.
**Note that you need to be logged in to Kaggle in order to download the datasets**.
If you save it in the same directory from which you are running this notebook, then you can load it the same way I will load it below.
====================================================================================================
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# let's load the dataset with just a few columns and a few rows
# to speed things up
use_cols = [
'loan_amnt', 'int_rate', 'annual_inc', 'open_acc', 'loan_status',
'open_il_12m'
]
data = pd.read_csv(
'loan.csv', usecols=use_cols).sample(
10000, random_state=44) # set a seed for reproducibility
data.head()
```
### Continuous Variables
```
# let's look at the values of the variable loan_amnt
# this is the amount of money requested by the borrower
# in US dollars
data.loan_amnt.unique()
# let's make an histogram to get familiar with the
# distribution of the variable
fig = data.loan_amnt.hist(bins=50)
fig.set_title('Loan Amount Requested')
fig.set_xlabel('Loan Amount')
fig.set_ylabel('Number of Loans')
```
The values of the variable vary across the entire range of the variable. This is characteristic of continuous variables.
The taller bars correspond to loan sizes of 10000, 15000, 20000, and 35000. There are more loans disbursed for those loan amount values. This indicates that most people tend to ask for these loan amounts. Likely, these particular loan amounts are pre-determined and offered as such in the Lending Club website.
Less frequent loan values, like 23,000 or 33,000 could be requested by people who require a specific amount of money for a definite purpose.
```
# let's do the same exercise for the variable interest rate,
# which is charged by lending club to the borrowers
data.int_rate.unique()
# let's make an histogram to get familiar with the
# distribution of the variable
fig = data.int_rate.hist(bins=30)
fig.set_title('Interest Rate')
fig.set_xlabel('Interest Rate')
fig.set_ylabel('Number of Loans')
```
Again, we see that the values of the variable vary continuously across the variable range.
```
# and now,let's explore the income declared by the customers,
# that is, how much they earn yearly.
fig = data.annual_inc.hist(bins=100)
fig.set_xlim(0, 400000)
fig.set_title("Customer's Annual Income")
fig.set_xlabel('Annual Income')
fig.set_ylabel('Number of Customers')
```
The majority of salaries are concentrated towards values in the range 30-70 k, with only a few customers earning higher salaries. Again, the values of the variable, vary continuosly across the variable range.
### Discrete Variables
Let's explore the variable "Number of open credit lines in the borrower's credit file" (open_acc in the dataset). This is, the total number of credit items (for example, credit cards, car loans, mortgages, etc) that is known for that borrower. By definition it is a discrete variable, because a borrower can have 1 credit card, but not 3.5 credit cards.
```
# let's inspect the values of the variable
data.open_acc.dropna().unique()
# let's make an histogram to get familiar with the
# distribution of the variable
fig = data.open_acc.hist(bins=100)
fig.set_xlim(0, 30)
fig.set_title('Number of open accounts')
fig.set_xlabel('Number of open accounts')
fig.set_ylabel('Number of Customers')
```
Histograms of discrete variables have this typical broken shape, as not all the values within the variable range are present in the variable. As I said, the customer can have 3 credit cards, but not 3,5 credit cards.
Let's look at another example of a discrete variable in this dataset: **Number of installment accounts opened in past 12 months** (open_il_12m in the dataset). Installment accounts are those that at the moment of acquiring them, there is a set period and amount of repayments agreed between the lender and borrower. An example of this is a car loan, or a student loan. The borrower knows that they are going to pay a certain, fixed amount over for example 36 months.
```
# let's inspect the variable values
data.open_il_12m.unique()
# let's make an histogram to get familiar with the
# distribution of the variable
fig = data.open_il_12m.hist(bins=50)
fig.set_title('Number of installment accounts opened in past 12 months')
fig.set_xlabel('Number of installment accounts opened in past 12 months')
fig.set_ylabel('Number of Borrowers')
```
The majority of the borrowers have none or 1 installment account, with only a few borrowers having more than 2.
### A variation of discrete variables: the binary variable
Binary variables, are discrete variables, that can take only 2 values, therefore binary.
In the next cells I will create an additional variable, called defaulted, to capture the number of loans that have defaulted. A defaulted loan is a loan that a customer has failed to re-pay and the money is lost.
The variable takes the values 0 where the loans are ok and being re-paid regularly, or 1, when the borrower has confirmed that will not be able to re-pay the borrowed amount.
```
# let's inspect the values of the variable loan status
data.loan_status.unique()
# let's create one additional variable called defaulted.
# This variable indicates if the loan has defaulted, which means,
# if the borrower failed to re-pay the loan, and the money
# is deemed lost.
data['defaulted'] = np.where(data.loan_status.isin(['Default']), 1, 0)
data.defaulted.mean()
# the new variable takes the value of 0
# if the loan is not defaulted
data.head()
# the new variable takes the value 1 for loans that
# are defaulted
data[data.loan_status.isin(['Default'])].head()
# A binary variable, can take 2 values. For example,
# the variable defaulted that we just created:
# either the loan is defaulted (1) or not (0)
data.defaulted.unique()
# let's make a histogram, although histograms for
# binary variables do not make a lot of sense
fig = data.defaulted.hist()
fig.set_xlim(0, 2)
fig.set_title('Defaulted accounts')
fig.set_xlabel('Defaulted')
fig.set_ylabel('Number of Loans')
```
As we can see, the variable shows only 2 values, 0 and 1, and the majority of the loans are ok.
**That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
| github_jupyter |
# Scrapy: part 1
**Scrapy** is a powerful web scraping framework for Python. A framework is still a library ("an API of functions") yet with more powerful built-in features. It can be described as the combination of all we learnt till now including requests, BeautifulSoup, lxml and RegEx. To install **Scrapy**, open the command prompt and run the following command:
```
pip install scrapy
```
Once scrapy is installed one can start experiencing it by just running the following command inside the command prompt (e.g. let's assume you want to scrape the http://quotes.toscrape.com/page/1/ page):
```
scrapy shell http://quotes.toscrape.com/page/1/
```
Now, you must be able to apply powerful scrapy functions to get the data you want. However, all of this are available inside the command prompt. If you want to experience the same inside a Jupyter notebook, you must try to *mimic* the command prompt behaviour by adding **5** additional lines as shown below (instead of running the abovementioned command). As this material is provided in a Jupyter notebook, we will also *mimic* the command prompt behavior, yet you are encouraged to experience it yourself.
```
import requests
from scrapy.http import TextResponse
url = "http://quotes.toscrape.com/page/1/"
r = requests.get(url)
response = TextResponse(r.url, body=r.text, encoding='utf-8')
```
Fine, now we are ready to apply the **scrapy** functions on our **response** object. All the code following this line is same for both Jupyter notebook users and those you chose to experience the command prompt approach.
As we covered before, there are two main ways to navigate over an HTML file: using CSS selectors and the XPath approach. While **BeautifulSoup** supported only the former, **Scrapy** has functions for both: **css()** for using css selectors and **xpath()** for the xpath approach.
### CSS selectors
Let's use CSS selectors to find the title of the page.
```
response.css('title')
```
As you can see it provides *more information than needed*. That's why there is an **extract()** function, that will extract only the component we are interested in without the additional information. It can be said that **css()** and **extract()** function mimic the **findAll()** behaviour from BeautifulSoup.
```
response.css('title').extract()
```
Excellent! We now have the correct tag we were looking for with the text inside. If we want to choose only the text content there is no need for using additional function: one just needs to add the following component to the CSS selector **::text** as shown below.
```
response.css('title::text').extract()
type(response.css('title::text').extract())
```
As mentioned before, the **extract()** function applied on the css selector mimics the **findAll()** behavior. This is true also about the output we receive: it has the type of list. If one needs to receive the unoce element as an output, the **extract_first()** function must be used, which will return the very first matched element (similarly to **find()** from BeautifulSoup).
```
response.css('title::text').extract_first()
type(response.css('title::text').extract_first())
```
Let's now try to find the heading of the page (which is Quotes to Scrape). Heading is provided inside a `<h1>` tag as usually.
```
response.css('h1').extract()
```
Again, we can get the heading text by using the **::text** guy.
```
response.css('h1::text').extract()
```
The latter did not really help because the heading text was inside an `<a>` tag, which in its turn was inside the above found `<h1>` tag.
```
response.css('h1 a').extract()
```
Nice! We found it. As you can see it has the style attribute that differenciates this `<a>` tag from others (kind of an identifier). We could use it to find this `<a>` tag even without mentioning that it is inside a `<h1>` guy. To do this in **Scrapy**, square brackets should be used.
```
response.css('a[style="text-decoration: none"]').extract()
```
Great! Let's now extract the text first and then go for the link inside this tag (i.e. the value of the **href** attribute).
```
response.css('a[style="text-decoration: none"]::text').extract()
```
To get the value of **href** attirubute (and same for any other attirubte) the following approach can be used in **Scrapy**, which can be considered the alternative to **get()** function in BeautifulSoup or lxml.
```
response.css('a[style="text-decoration: none"]::attr(href)').extract()
```
**Scrapy** also supports regular expressions that can directly be applied on matched response. For example, let's select only the "to Scrape" part from the heading using regular expressions. We just need to substitute the **extract()** function with a **re()** function that will take the expression as an argument.
```
# expression explanation: find Quotes, a whitespace, anything else
# return only anything else component
response.css('h1 a::text').re('Quotes\s(.*)')
```
Similarly, we could use RegEx to find and match and return each for of the heading separately as a list element:
```
response.css('h1 a::text').re('(\S+)\s(\S+)\s(\S+)')
```
Perfect, we are done now with **css()** function, let's now implement the same in **xpath()**.
### XPath approach
```
response.xpath('//title').extract()
```
To get the text only, the following should be added to the Xpath argument: **/text()**
```
response.xpath('//title/text()').extract()
```
Similarly, we can find the `<a>` tag inside the `<h1>` and extract first text then the link.
```
response.xpath('//h1/a').extract()
response.xpath('//h1/a/text()').extract()
```
**xpath()** function operates in the same way as in the **lxml** package, which means **/@href** should be added to the path to select the value of the **href** attribute (i.e. the link).
```
response.xpath('//h1/a/@href').extract()
```
This is all for Part 1. We just used Scrapy as a library and experienced part its power: **Scrapy** is kind of the combination of whatever we learnt till now. Yet, this is not the only reason **Scrapy** is powerful and demanded. The rest will be covered in following parts.
P.S. If you were using command prompt to run this code, then run **exit()** comand to exit Scrapy. If you want to save your commands before exiting into a Python file, then the following command will be of use:
```
%save my_commands 1-56
```
where my_commands is the name of the file to be created (change it based on your taste) and 1-56 tells Python to save the code starting from the line 1 (very begining) and ending with line 56 (put the line that you want here, last one if you want to save whole code).
| github_jupyter |
```
# Imports
import csv
import pandas as pd
import itertools
import math
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import spacy
import string
import re
import nltk
import random
import praw
from google.colab import files
import seaborn as sns
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, f1_score, precision_score
from sklearn.metrics import classification_report, plot_confusion_matrix
# Create dataframe using 2020PB source
# Import and normalize the json file
df_2020pb = pd.read_json('https://raw.githubusercontent.com/2020PB/police-brutality/data_build/all-locations-v2.json')
df_2020pb = pd.json_normalize(df_2020pb['data'])
df_2020pb = df_2020pb.drop(columns=['edit_at'])
# Clean the urls
def cleanlinks(json):
links_out = []
for link in json:
links_out.append(link['url'])
return links_out
df_2020pb['links'] = df_2020pb['links'].apply(cleanlinks)
df_2020pb['date'] = pd.to_datetime(df_2020pb['date'],format='%Y-%m-%d')
# Create a list of all the tags
all_tags = df_2020pb['tags'].copy()
tags = set()
for taglist in all_tags:
for tag in taglist:
if tag not in tags:
tags.add(tag)
df_2020pb.rename(columns={'name':'title'}, inplace=True)
df_2020pb.head()
# Create new news.csv
!pip install praw
#### REDDIT CREDENTIALS HERE!
reddit = praw.Reddit(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, password=PASSWORD, user_agent=USER_AGENT, username=USERNAME)
# Grabbing 1000 hottest posts on Reddit
data = []
# Grab the data from the "news" subreddit
for submission in reddit.subreddit("news").hot(limit=1000):
data.append([submission.id, submission.title, submission.score, submission.subreddit, submission.url,
submission.num_comments, submission.selftext, submission.created])
# Create and assign column names
col_names = ['id', 'title', 'score', 'subreddit', 'url',
'num_comments', 'text', 'created']
df_reddit = pd.DataFrame(data, columns=col_names)
df_reddit.head()
# Combine dataframes for single source
df_combined = pd.concat([df_2020pb, df_reddit])
df_combined.head()
# Extract id and title information
df_titles = df_combined[['id', 'title']]
df_titles.head()
# Create a csv from our combined dataframe
df_titles.to_csv('all_sources.csv', index=False)
# Ensure that the csv is working by reading it back in and checking its head
news = pd.read_csv('all_sources.csv')
news.head()
# Import not_brutality training csv
train_not_brutality = pd.read_csv('https://raw.githubusercontent.com/Lambda-School-Labs/Labs27-C-HRF-DS/main/notebooks/news.csv')
# Import is_brutality training csv
train_brutality = pd.read_csv('https://raw.githubusercontent.com/Lambda-School-Labs/Labs27-C-HRF-DS/main/notebooks/police_use_force.csv')
# Assign 0 to the police_brutality score in the not_brutality training csv and drop extraneous columns
train_not_brutality['police_brutality'] == 0
train_not_brutality = train_not_brutality.drop(['id', 'score','subreddit', 'url','num_comments','text','created'], axis=1)
train_not_brutality.head()
# Ensure that the police brutality column consists of all 1s and rename name column
train_brutality['police_brutality'] == 1
train_brutality.rename(columns={'name':'title'}, inplace=True)
train_brutality.head()
# Combine training data
df = pd.concat([train_brutality, train_not_brutality])
df.head()
# Create tokenizer functions
nlp = spacy.load("en")
def tokenize(text):
text = nlp(text)
tokens = [token.lemma_ for token in text if (token.is_stop != True) and (token.is_punct != True) and (token.text != " ")]
return tokens
def retoken(text):
tokens = re.sub(r'[^a-zA-Z ^0-9]', '', text)
tokens = tokens.lower().split()
tokens = str(tokens)
return tokens
# Apply tokenizer functions to our combined dataframe
df['tokens'] = df['title'].apply(tokenize)
df['tokens'] = df['title'].apply(retoken)
df.head()
# Create vectorizer and fit and transform the tokens column
vect = CountVectorizer()
vect.fit(df['tokens'])
dtm = vect.transform(df['tokens'])
# Create datafrane from features
dtm_df = pd.DataFrame(dtm.todense(), columns = vect.get_feature_names())
dtm_df.head()
# Separate dataframe into train and test datasets
X = df['title']
y = df['police_brutality']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# Build model pipeline using RFC
pipeline = Pipeline([
('tfidf', TfidfVectorizer()),
('classifier', RandomForestClassifier(random_state=42, n_jobs=-1, max_depth=5, n_estimators=45,)),
])
pipeline.fit(X_train,y_train)
predictions = pipeline.predict(X_test)
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from scipy.stats import randint, uniform
# Fit model
param_distributions = {
'classifier__max_depth': [1, 2, 3, 4, 5]}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=10,
cv=3,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
# Check best parameters for further tuning
print('Best hyperparameters', search.best_params_)
print('Best Score', search.best_score_)
# Check classification report
print(classification_report(y_test,predictions))
# Plot confusion matrix
cm = plot_confusion_matrix(pipeline, X_test, y_test, values_format='.0f', xticks_rotation='vertical')
cm;
# Obtain ROC AUC score
from sklearn.metrics import roc_auc_score
y_pred_proba = pipeline.predict_proba(X_test)[:, -1]
print('Test ROC AUC:', roc_auc_score(y_test, y_pred_proba))
# Save and pickle the model for pickling
from pickle import dump
dump(pipeline, open('hrfc_rfmodel_v1.pkl', 'wb'))
# Load the model
from pickle import load
loaded_model = load(open('hrfc_rfmodel_v1.pkl', 'rb'))
# Create a function to get predictions using the model
def get_predictions(post, num_answers=2):
""" takes a post and returns the top categories it fits in """
# Get the predicted probabilities for each class
preds = pd.Series(pipeline.predict_proba(post)[0])
# Save each class to the Series index
preds.index = pipeline.classes_
# Sort to get the most likely classes
preds = preds.sort_values(ascending=False)
# Return the top num_answers results in dict format
return preds[:num_answers].to_dict()
# Create a list with our titles to run model on
titles_list = news['title'].tolist()
titles_list
# Run model and save to list of predictions
predictions = []
for title in titles_list:
predictions.append(get_predictions([title]))
# Save predictions into a column on the news dataframe. Check results
news['prediction'] = predictions
news.head(10)
```
| github_jupyter |
# Supplementary Material: Analyse AdaptiveAttention
AdaptiveAttention is from the paper by Lu et al. (2017):
```
@inproceedings{lu2017knowing,
title={Knowing when to look: Adaptive attention via a visual sentinel for image captioning},
author={Lu, Jiasen and Xiong, Caiming and Parikh, Devi and Socher, Richard},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
volume={6},
pages={2},
year={2017}
}
```
The following code depnds on pre-generated image captions using the implementation of the model from here:
https://github.com/jiasenlu/AdaptiveAttention
`seqs_test.npy` : the generated sequence.
`attens_test.npy` : the corresponding attentions.
`cocotalk_vocab.json` : the vocabulary provided with the model.
The generated sequences are for images in the validation images in MS-COCO:
http://images.cocodataset.org/zips/val2014.zip
```
@inproceedings{lin2014microsoft,
title={Microsoft coco: Common objects in context},
author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence},
booktitle={European conference on computer vision},
pages={740--755},
year={2014},
organization={Springer}
}
```
```
import numpy as np
from scipy.stats import spearmanr
import json
from nltk import pos_tag, tokenize
import re
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.collections import PatchCollection
from PIL import Image, ImageDraw
seqs = np.load('seqs_test.npy')[1:]
attens = np.load('attens_test.npy')[1:]
ix_to_word = json.load(open('cocotalk_vocab.json'))['ix_to_word']
ix_to_word = [ix_to_word[str(i+1)] for i in range(len(ix_to_word))]
en_preps = ['in', 'on', 'at', 'to', 'above', 'below', 'over', 'under']
# Landau English prepositions
en_preps = [
# simple spatial relations
'at', 'on', 'in', 'on', 'off',
'out', 'by', 'from', 'to',
'up', 'down', 'over', 'under',
'with', ('within', 'with in'), ('without', 'with out'), 'near',
'neadby', ('into', 'in to'), ('onto', 'on to'), 'toward',
'through', 'throughout', 'underneath', 'along',
'across', ('among', 'amongst'), 'against', 'around',
'about', 'above', ('amid', 'amidst'), 'before',
'behind', 'below', 'beneath', 'between',
'beside', 'outside', 'inside', ('alongside', 'along side'),
'via', 'after', 'upon',
# compounds
('top', 'on top of'), ('between', 'in between'), ('right', 'to the right of'), ('parallel', 'parallel to'),
('back', 'in back of'), ('left', 'to the left of'), ('side', 'to the side'), ('perpendicular', 'perpendicular to'),
('front', 'in front of'),
# temporal only
'during', 'since', 'until', 'ago',
# intransitivies (+ additional variations)
'here', 'outward', ('backward', 'backwards'), ('south' , 'south of'),
'there', ('afterward', 'afterwards'), 'away', ('east', 'east of'),
'upward', 'upstairs', 'apart', ('west', 'west of'),
'downward', 'downstairs', 'together', 'left',
'inward', 'sideways', ('north', 'north of'), 'right',
]
# Herskovits projective_terms
en_preps += [(w2, w1+' the '+w2+' of') for w1 in ['at', 'on', 'to', 'by'] for w2 in ['left', 'right'] ]
en_preps += [(w2, w1+' the '+w2+' side of') for w1 in ['at', 'on', 'in', 'to', 'by'] for w2 in ['left', 'right']]
en_preps += [(w2, w1+' the '+w2+' hand side of') for w1 in ['at', 'on', 'in', 'to', 'by'] for w2 in ['left', 'right']]
en_preps += [(w2, w1+' the '+w2+' of') for w1 in ['at', 'on', 'in', 'to', 'by'] for w2 in ['front', 'back', 'side']]
en_preps += [(w1, 'in '+w1+' of') for w1 in ['front', 'back']]
en_preps += [(w1,) for w1 in ['before', 'behind']]
en_preps += [(w1, w1+' of') for w1 in ['left', 'right', 'back']]
en_preps += [(w1,) for w1 in ['above', 'below']]
en_preps += [(w1,) for w1 in ['over', 'under']]
en_preps += [(w2, w1+' the '+w2+' of') for w1 in ['at', 'on', 'in', 'by'] for w2 in ['top', 'bottom']]
en_preps += [(w2, w1+' '+w2+' of') for w1 in ['on'] for w2 in ['top']]
# missing items?
en_preps += [('next', 'next to'), ('front', 'on the front of', 'on front of', 'front of')]
# for those who lost 'the'
en_preps += [(w2, w1+' '+w2+' of') for w1 in ['at', 'on', 'to', 'by'] for w2 in ['left', 'right'] ]
en_preps += [(w2, w1+' '+w2+' side of') for w1 in ['at', 'on', 'in', 'to', 'by'] for w2 in ['left', 'right']]
en_preps += [(w2, w1+' '+w2+' hand side of') for w1 in ['at', 'on', 'in', 'to', 'by'] for w2 in ['left', 'right']]
en_preps += [(w2, w1+' '+w2+' of') for w1 in ['at', 'on', 'in', 'to', 'by'] for w2 in ['front', 'back', 'side']]
en_preps += [(w2, w1+' '+w2+' of') for w1 in ['at', 'on', 'in', 'by'] for w2 in ['top', 'bottom']]
# fix the tuple types
en_preps = [(w,) if type(w) != tuple else w for w in en_preps]
# This will create a ditionary of preposition variations to a simple tocken
composit2simple = dict()
composit2simple.update({w_alt: w[0] for w in en_preps for w_alt in w})
# every key is itself!
composit2simple.update({w: w for w in composit2simple.values()})
# fix a common annotation with 'a', 'is', 'are'
composit2simple.update({new_w_alt: w for w_alt, w in composit2simple.items() for new_w_alt in [w_alt + ' a', 'are ' + w_alt + ' a', 'is ' + w_alt + ' a', 'are ' + w_alt, 'is ' + w_alt]})
# basic keywords, just for presentation
spatial_key_words = [
'left', 'right',
'in', 'on',
'over', 'above',
'below', 'under',
'front', 'back',
'next', 'near',
'far', 'away',
'between',
]
# all keywords in Herskovits and all keywords in Landau
spatial_key_words = list(set(composit2simple.values()))
```
### Extract the attentions by part of speech
```
seq_to_sent = lambda seq, ix_to_word: [ix_to_word[ix-1] for ix in seq if ix != 0 and ix-1 != len(ix_to_word)]
ix_scores = {i+1: [] for i in range(len(ix_to_word))}
for ix, score in zip(seqs.flatten(), attens[:,:, :1].flatten()):
if ix in ix_scores:
ix_scores[ix].append(1-score)
vocabs_scores = {
ix_to_word[ix-1]: [np.mean(ix_scores[ix]), np.std(ix_scores[ix]), len(ix_scores[ix])] if len(ix_scores[ix]) > 0 else [0, 0, 0]
for ix in ix_scores
}
pos_scores = {}
# left and right attentions on nouns
spatial_key_words_lr_attens = {
w: np.zeros((0, 3, attens.shape[2]-1))
for w in spatial_key_words
}
spatial_key_words_adaptive_attens = {
w: np.zeros((0, 3, 1))
for w in spatial_key_words
}
for seq, atten in zip(seqs, attens):
sent = seq_to_sent(seq, ix_to_word)
# POS tag report
poses = list(zip(*pos_tag(sent, tagset='universal')))[1]
for pos, score in zip(poses, atten[:len(sent), 0]):
if pos in pos_scores:
pos_scores[pos].append(1-score)
else:
pos_scores[pos]= [1-score]
# spatial attention averaging
for w in spatial_key_words_lr_attens:
if w in sent:
left_nouns = [i for i, pos in enumerate(poses) if i < sent.index(w) and pos == 'NOUN']
right_nouns = [i for i, pos in enumerate(poses) if i > sent.index(w) and pos == 'NOUN']
if len(left_nouns) == 0 or len(right_nouns) == 0:
continue
spatial_key_words_lr_attens[w] = np.concatenate([
spatial_key_words_lr_attens[w],
np.expand_dims(np.array([np.mean(atten[left_nouns, 1:], 0), atten[sent.index(w), 1:], np.mean(atten[right_nouns, 1:], 0)]), 0)
])
spatial_key_words_adaptive_attens[w] = np.concatenate([
spatial_key_words_adaptive_attens[w],
np.expand_dims(np.array([np.mean(atten[left_nouns, 0:1], 0), atten[sent.index(w), 0:1], np.mean(atten[right_nouns, 0:1], 0)]), 0)
])
spatial_key_words_lr_attens_mean = {
w: np.mean(spatial_key_words_lr_attens[w], 0)
for w in spatial_key_words_lr_attens
if spatial_key_words_lr_attens[w].shape[0] > 0
}
spatial_key_words_adaptive_attens_mean = {
w: 1-np.mean(spatial_key_words_adaptive_attens[w], 0).reshape((3,))
for w in spatial_key_words_adaptive_attens
if spatial_key_words_adaptive_attens[w].shape[0] > 0
}
pos_scores = {
pos: [np.mean(pos_scores[pos]), np.std(pos_scores[pos]), len(pos_scores[pos])]
for pos in pos_scores
}
spatial_key_words_adaptive_attens_mean
```
### Visualizations and Reports
```
%matplotlib inline
# generate the colors for your colormap
red = (1.,0.,0.,.7)
red0 = (1.,0.,0.,.0)
blue = (0.,0.,1.,.7)
blue0 = (0.,0.,1.,.0)
white = (1.,1.,1.,1.)
black = (0.,0.,0.,1.)
grey = (.5,.5,.5,.5)
# make the colormaps
cmap_black = LinearSegmentedColormap.from_list('cmap_black',[white,black],256)
cmap_blue = LinearSegmentedColormap.from_list('cmap_blue',[blue0, blue,],256)
cmap_red = LinearSegmentedColormap.from_list('cmap_red',[red0, red,],256)
cmap_grey = LinearSegmentedColormap.from_list('cmap_grey',[white,black, ],256)
for index, w in enumerate(spatial_key_words):
if spatial_key_words_lr_attens[w].shape[0] < 5:
continue
plt.figure(figsize=(3, 2))
# the attentions
# target
ax = plt.subplot(2, 3, 1, )
plt.ylabel('n={count}'.format(count=spatial_key_words_lr_attens[w].shape[0] if w in spatial_key_words_lr_attens_mean else 0))
plt.imshow(np.zeros((7,7)), cmap=cmap_black)
plt.imshow(spatial_key_words_lr_attens_mean[w][0].reshape((7,7))**5, cmap=cmap_red)
plt.xticks([])
plt.yticks([])
# relation
ax = plt.subplot(2, 3, 2, )
plt.imshow(np.zeros((7,7)), cmap=cmap_black)
plt.imshow(spatial_key_words_lr_attens_mean[w][1].reshape((7,7))**5, cmap=cmap_grey)
plt.xticks([])
plt.yticks([])
# landmark
ax = plt.subplot(2, 3, 3, )
ax.yaxis.set_label_text('generate')
ax.yaxis.set_label_position("right")
plt.imshow(np.zeros((7,7)), cmap=cmap_black)
plt.imshow(spatial_key_words_lr_attens_mean[w][2].reshape((7,7))**5, cmap=cmap_blue)
plt.xticks([])
plt.yticks([])
# adaptive attention
ax = plt.subplot(2, 1, 2, )
#plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
#plt.title('{word}'.format(word=w))
plt.ylabel('adaptive')
ax.yaxis.set_label_position("right")
plt.plot([1, 2, 3], spatial_key_words_adaptive_attens_mean[w])
plt.xticks([0.5,1,2,3,3.5], ['', '<target>', w, '<landmark>', ''])
plt.yticks([0.5, 0.75, 1.], ['0.5', '0.7', '1.0'])
plt.grid(True)
plt.savefig('reports/{0}-adaptive.pdf'.format(w))
plt.show()
print("{word:8} {union:5} {target:5} {landmark:5} {trglnd:5}".format(
word="word",
union="uni",
target="trg",
landmark="lnd",
trglnd="trglnd",
))
for index, w in enumerate(spatial_key_words):
if spatial_key_words_lr_attens[w].shape[0] < 5:
continue
union=spearmanr(spatial_key_words_lr_attens_mean[w][1], spatial_key_words_lr_attens_mean[w][0]+spatial_key_words_lr_attens_mean[w][2]).correlation
target=spearmanr(spatial_key_words_lr_attens_mean[w][1], spatial_key_words_lr_attens_mean[w][0]).correlation
landmark=spearmanr(spatial_key_words_lr_attens_mean[w][1], spatial_key_words_lr_attens_mean[w][2]).correlation
trglnd=spearmanr(spatial_key_words_lr_attens_mean[w][0], spatial_key_words_lr_attens_mean[w][2]).correlation
measure=spearmanr(spatial_key_words_lr_attens_mean[w][1], spatial_key_words_lr_attens_mean[w][0]+spatial_key_words_lr_attens_mean[w][2] - spatial_key_words_lr_attens_mean[w][0]*spatial_key_words_lr_attens_mean[w][2]).correlation
others=0
count=0
for w0 in spatial_key_words:
if spatial_key_words_lr_attens[w0].shape[0] < 5 and w != w0:
continue
others+=spearmanr(spatial_key_words_lr_attens_mean[w][1], spatial_key_words_lr_attens_mean[w0][0]+spatial_key_words_lr_attens_mean[w0][2]).correlation
count+=1
others = others/count
std_trg=(np.max(spatial_key_words_lr_attens_mean[w][0])-np.min(spatial_key_words_lr_attens_mean[w][0]))
std_rel=(np.max(spatial_key_words_lr_attens_mean[w][1])-np.min(spatial_key_words_lr_attens_mean[w][1]))
std_lnd=(np.max(spatial_key_words_lr_attens_mean[w][2])-np.min(spatial_key_words_lr_attens_mean[w][2]))
print("{word:8} {union:5.2f} & {target:5.2f} & {landmark:5.2f} & {others:5.2f} {trglnd:5.2f} {random:5.2f} {measure} {std_trg:5.4f} & {std_rel:5.4f} & {std_lnd:5.4f}".format(
word=w,
union=union,
target=target,
landmark=landmark,
others=others,
trglnd=trglnd,
random=spearmanr(np.random.rand(49), spatial_key_words_lr_attens_mean[w][0]+spatial_key_words_lr_attens_mean[w][2]).correlation,
#measure=union/others \
#spearmanr(spatial_key_words_lr_attens_mean[w][1], spatial_key_words_lr_attens_mean[w][0]+spatial_key_words_lr_attens_mean[w][2] - spatial_key_words_lr_attens_mean[w][0]*spatial_key_words_lr_attens_mean[w][2]).correlation \
#- spearmanr(spatial_key_words_lr_attens_mean[w][0], spatial_key_words_lr_attens_mean[w][2]).correlation \
#+ (1 - ) \
measure="${0:0.2f} \pm {1:0.1f}$, ${2:0.2f} \pm {3:0.1f}$, ${4:0.2f} \pm {5:0.1f}$".format(
1-np.mean(spatial_key_words_adaptive_attens[w],0).flatten()[0],
np.std(spatial_key_words_adaptive_attens[w],0).flatten()[0],
1-np.mean(spatial_key_words_adaptive_attens[w],0).flatten()[1],
np.std(spatial_key_words_adaptive_attens[w],0).flatten()[1],
1-np.mean(spatial_key_words_adaptive_attens[w],0).flatten()[2],
np.std(spatial_key_words_adaptive_attens[w],0).flatten()[2],
)
,
std_trg=std_trg,
std_rel=std_rel,
std_lnd=std_lnd,
))
pos_scores
spatial_key_words = [
'left', 'right',
'in', 'on',
'over', 'above',
'below', 'under',
'front', 'back',
'next', 'near',
'far', 'away',
'between',
]
spatial_key_words = [
w for w in set(composit2simple.values())
if w in vocabs_scores
]
some_objects = [
'cat', 'dog',
'man', 'woman',
'cup', 'plate',
'table', 'chair',
'pen', 'book',
'laptop', 'pizza',
'tree', 'road',
'building', 'mountain'
]
some_properties = [
'red', 'green', 'blue', 'black', 'white', 'grey', 'purple',
'big', 'small',
'tall', 'short',
]
some_numers = [
'one', 'two', 'three', 'four', 'five', 'six'
]
some_verbs = [
'jumping', 'running', 'eating',
'jump', 'run', 'eat',
]
some_verbs_aux = [
'is', 'are', 'does',
'can', 'could', 'will', 'would'
]
det_indices = [
'a', 'the',
'this', 'that', 'there',
]
other_words = [
'from', 'to', 'for',
]
some_pronouns = [
'she', 'he', 'it', 'they',
'her', 'his', 'him', 'its', 'them',
]
categories = {
'spatial': spatial_key_words,
'objects': some_objects,
'properties': some_properties,
'numers': some_numers,
'verbs': some_verbs,
'verbs_aux': some_verbs_aux,
'others': other_words,
'pronouns': some_pronouns,
'det_indices': det_indices
}
print('{word}\t{count}\t${mean}\pm{std}$'.format(mean='mean',std='std',count='count',word='word'))
cat = 'pos'
print((14-int(len(cat)/2))*'-', cat, (14-int(len(cat)/2))*'-')
poses = list(pos_scores.keys())
poses.sort(key=lambda w: pos_scores[w][0], reverse=True)
for w in poses:
print('{word}\t& {count}\t& ${mean:0.2f} \pm {std:0.2f}$ \\\\'.format(
mean=pos_scores[w][0],
std=pos_scores[w][1],
count=pos_scores[w][2],
word=w
))
for cat in categories:
print((14-int(len(cat)/2))*'-', cat, (14-int(len(cat)/2))*'-')
# sort the word category:
categories[cat].sort(key=lambda w: vocabs_scores[w][0], reverse=True)
for w in categories[cat]:
if vocabs_scores[w][2] == 0:
continue
print('{word}\t& {count}\t& ${mean:0.2f} \pm {std:0.2f}$ \\\\'.format(
mean=vocabs_scores[w][0],
std=vocabs_scores[w][1],
count=vocabs_scores[w][2],
word=w
))
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.