repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
suriyan/ethnicolr | ethnicolr/examples/ethnicolr_app_contrib2010.ipynb | mit | import pandas as pd
df = pd.read_csv('/opt/names/fec_contrib/contribDB_2010.csv.zip', nrows=100)
df.columns
"""
Explanation: Application: Illustrating the use of the package by imputing the race of the campaign contributors recorded by FEC for the years 2000 and 2010
a) what proportion of contributors were black, whites, hispanics, asian etc.
b) and proportion of total donation given by blacks, hispanics, whites, and asians.
c) get amount contributed by people of each race and divide it by total amount contributed.
End of explanation
"""
df = pd.read_csv('/opt/names/fec_contrib/contribDB_2010.csv.zip', usecols=['date', 'amount', 'contributor_type', 'contributor_lname', 'contributor_fname', 'contributor_name'])
df
#sdf = df[df.contributor_type=='I'].sample(1000)
sdf = df[df.contributor_type=='I'].copy()
sdf
from clean_names import clean_name
def do_clean_name(n):
n = str(n)
return clean_name(n)
#sdf['clean_name'] = sdf['contributor_name'].apply(lambda c: do_clean_name(c))
#sdf
from ethnicolr import census_ln, pred_census_ln
rdf = pred_census_ln(sdf, 'contributor_lname', 2010)
rdf
#rdf.to_csv('output-pred-contrib2010-ln.csv', index_label='idx')
"""
Explanation: amount, date, contributor_name, contributor_lname, contributor_fname, contributor_type == 'I'
End of explanation
"""
adf = rdf.groupby(['race']).agg({'contributor_lname': 'count'})
adf *100 / adf.sum()
"""
Explanation: a) what proportion of contributors were black, whites, hispanics, asian etc.
End of explanation
"""
bdf = rdf.groupby(['race']).agg({'amount': 'sum'})
bdf * 100 / bdf.sum()
"""
Explanation: b) and proportion of total donation given by blacks, hispanics, whites, and asians.
End of explanation
"""
contrib_white = sum(rdf.amount * rdf.white)
contrib_black = sum(rdf.amount * rdf.black)
contrib_api = sum(rdf.amount * rdf.api)
contrib_hispanic = sum(rdf.amount * rdf.hispanic)
contrib_amount = [{'race': 'white', 'amount': contrib_white},
{'race': 'black', 'amount': contrib_black},
{'race': 'api', 'amount': contrib_api},
{'race': 'hispanic', 'amount': contrib_hispanic}]
contrib_df = pd.DataFrame(contrib_amount, columns=['race', 'amount'])
contrib_df.amount /= 10e6
contrib_df.columns = ['race', 'amount($1M)']
contrib_df
contrib_df.set_index('race', inplace=True, drop=True)
contrib_df.columns = ['% amount']
contrib_df * 100 / contrib_df.sum()
"""
Explanation: c) get amount contributed by people of each race and divide it by total amount contributed.
End of explanation
"""
|
omoju/udacityUd120Lessons | Text Learning.ipynb | gpl-3.0 | from_sara = open('../text_learning/from_sara.txt', "r")
from_chris = open('../text_learning/from_chris.txt', "r")
from_data = []
word_data = []
from nltk.stem.snowball import SnowballStemmer
import string
filePath = '/Users/omojumiller/mycode/hiphopathy/HipHopDataExploration/JayZ/'
f = open(filePath+"JayZ_American Gangster_American Gangster.txt", "r")
f.seek(0) ### go back to beginning of file (annoying)
all_text = f.read()
content = all_text.split("X-FileName:")
words = ""
stemmer = SnowballStemmer("english")
text_string = content
for sentence in text_string:
words = sentence.split()
stemmed_words = [stemmer.stem(word) for word in words]
def parseOutText(f):
""" given an opened email file f, parse out all text below the
metadata block at the top
example use case:
f = open("email_file_name.txt", "r")
text = parseOutText(f)
"""
stemmer = SnowballStemmer("english")
f.seek(0) ### go back to beginning of file (annoying)
all_text = f.read()
### split off metadata
content = all_text.split("X-FileName:")
words = ""
if len(content) > 1:
### remove punctuation
text_string = content[1].translate(string.maketrans("", ""), string.punctuation)
### split the text string into individual words, stem each word,
### and append the stemmed word to words (make sure there's a single
### space between each stemmed word)
words = ' '.join([stemmer.stem(word) for word in text_string.split()])
return words
ff = open("../text_learning/test_email.txt", "r")
text = parseOutText(ff)
print text
"""
Explanation: The list of all the emails from Sara are in the from_sara list likewise for emails from Chris (from_chris).
The actual documents are in the Enron email dataset, which you downloaded/unpacked. The data is stored in lists and packed away in pickle files at the end.
End of explanation
"""
temp_counter = 1
for name, from_person in [("sara", from_sara), ("chris", from_chris)]:
for path in from_person:
### only look at first 200 emails when developing
### once everything is working, remove this line to run over full dataset
#temp_counter += 1
if temp_counter:
path = os.path.join('..', path[:-1])
#print path
email = open(path, "r")
### use parseOutText to extract the text from the opened email
text = parseOutText(email)
### use str.replace() to remove any instances of the words
replaceWords = ["sara", "shackleton", "chris", "germani"]
for word in replaceWords:
text = text.replace(word, '')
### append the text to word_data
word_data.append(text)
### append a 0 to from_data if email is from Sara, and 1 if email is from Chris
if name == "sara":
from_data.append(0)
else:
from_data.append(1)
email.close()
print "emails processed"
len(word_data)
"""
Explanation: temp_counter is a way to speed up the development--there are thousands of emails from Sara and Chris, so running over all of them can take a long time. temp_counter helps you only look at the first 200 emails in the list so you can iterate your modifications quicker
End of explanation
"""
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(stop_words="english",lowercase=True)
bag_of_words = vectorizer.fit(word_data)
"""
Explanation: TfIdf
Tf Term Frequency
Idf Inverse document frequency
End of explanation
"""
len(vectorizer.get_feature_names())
"""
Explanation: How many different words are there?
End of explanation
"""
vectorizer.get_feature_names()[34597]
"""
Explanation: What is word number 34597 in your TfIdf?
End of explanation
"""
|
camillescott/boink | notebooks/LabeledLinearAssembler_review.ipynb | mit | K = 21
graph = khmer.Countgraph(K, 1e6, 4)
labeller = khmer._GraphLabels(graph)
graph.consume(contig)
bubble = mutate_position(contig, 100)
reads = list(itertools.chain(reads_from_sequence(contig), reads_from_sequence(bubble)))
random.shuffle(reads)
for n, read in enumerate(reads):
graph.consume(read)
hdns = graph.find_high_degree_nodes(read)
labeller.label_across_high_degree_nodes(read, hdns, n)
paths = labeller.assemble_labeled_path(contig[:K])
print(*[str(len(p)) + ' ' + p for p in paths], sep='\n\n')
"""
Explanation: With the NaiveLabeledAssembler, we expect this configuration to produce only one shortened contig: we get two possible branches with spanning reads, neither of which has coverage of 1 (being a putative error / tip).
End of explanation
"""
K = 21
graph = khmer.Countgraph(K, 1e6, 4)
labeller = khmer._GraphLabels(graph)
bubble = mutate_position(contig, 100)
reads = itertools.chain(reads_from_sequence(contig, N=50),
[bubble],
reads_from_sequence(contig, N=50))
for n, read in enumerate(reads):
graph.consume(read)
hdns = graph.find_high_degree_nodes(read)
if list(hdns):
print('Read:', n)
print([str_tag(h, K) for h in hdns])
labeller.label_across_high_degree_nodes(read, hdns, n)
paths = labeller.assemble_labeled_path(contig[:K])
print(*[str(len(p)) + ' ' + p for p in paths], sep='\n\n')
nodegraph = khmer.Nodegraph(K, 1e5, 4)
lh = khmer._GraphLabels(nodegraph)
nodegraph.consume(contig)
branch = contig[:120] + 'TGATGGACAG'
nodegraph.consume(branch) # will add a branch
hdn = nodegraph.find_high_degree_nodes(contig)
hdn += nodegraph.find_high_degree_nodes(branch)
print(list(hdn))
lh.label_across_high_degree_nodes(contig, hdn, 1)
lh.label_across_high_degree_nodes(branch, hdn, 2)
print(lh.get_tag_labels(list(hdn)[0]))
paths = lh.assemble_labeled_path(contig[:K])
print([len(x) for x in paths])
len_path = len(paths)
print('len path:', len_path)
"""
Explanation: Let's try introducing an error in a singe read. This should trip the filter and cause one full length contig to be produced, so long as the coverage at the branch is greater than the arbitrarily selected minimum.
End of explanation
"""
|
jrg365/gpytorch | examples/03_Multitask_Exact_GPs/Multitask_GP_Regression.ipynb | mit | import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
"""
Explanation: Multitask GP Regression
Introduction
Multitask regression, introduced in this paper learns similarities in the outputs simultaneously. It's useful when you are performing regression on multiple functions that share the same inputs, especially if they have similarities (such as being sinusodial).
Given inputs $x$ and $x'$, and tasks $i$ and $j$, the covariance between two datapoints and two tasks is given by
$$ k([x, i], [x', j]) = k_\text{inputs}(x, x') * k_\text{tasks}(i, j)
$$
where $k_\text{inputs}$ is a standard kernel (e.g. RBF) that operates on the inputs.
$k_\text{task}$ is a lookup table containing inter-task covariance.
End of explanation
"""
train_x = torch.linspace(0, 1, 100)
train_y = torch.stack([
torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,
torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,
], -1)
"""
Explanation: Set up training data
In the next cell, we set up the training data for this example. We'll be using 100 regularly spaced points on [0,1] which we evaluate the function on and add Gaussian noise to get the training labels.
We'll have two functions - a sine function (y1) and a cosine function (y2).
For MTGPs, our train_targets will actually have two dimensions: with the second dimension corresponding to the different tasks.
End of explanation
"""
class MultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.MultitaskMean(
gpytorch.means.ConstantMean(), num_tasks=2
)
self.covar_module = gpytorch.kernels.MultitaskKernel(
gpytorch.kernels.RBFKernel(), num_tasks=2, rank=1
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
model = MultitaskGPModel(train_x, train_y, likelihood)
"""
Explanation: Define a multitask model
The model should be somewhat similar to the ExactGP model in the simple regression example.
The differences:
We're going to wrap ConstantMean with a MultitaskMean. This makes sure we have a mean function for each task.
Rather than just using a RBFKernel, we're using that in conjunction with a MultitaskKernel. This gives us the covariance function described in the introduction.
We're using a MultitaskMultivariateNormal and MultitaskGaussianLikelihood. This allows us to deal with the predictions/outputs in a nice way. For example, when we call MultitaskMultivariateNormal.mean, we get a n x num_tasks matrix back.
You may also notice that we don't use a ScaleKernel, since the IndexKernel will do some scaling for us. (This way we're not overparameterizing the kernel.)
End of explanation
"""
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iterations = 2 if smoke_test else 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))
optimizer.step()
"""
Explanation: Train the model hyperparameters
End of explanation
"""
# Set into eval mode
model.eval()
likelihood.eval()
# Initialize plots
f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))
# Make predictions
with torch.no_grad(), gpytorch.settings.fast_pred_var():
test_x = torch.linspace(0, 1, 51)
predictions = likelihood(model(test_x))
mean = predictions.mean
lower, upper = predictions.confidence_region()
# This contains predictions for both tasks, flattened out
# The first half of the predictions is for the first task
# The second half is for the second task
# Plot training data as black stars
y1_ax.plot(train_x.detach().numpy(), train_y[:, 0].detach().numpy(), 'k*')
# Predictive mean as blue line
y1_ax.plot(test_x.numpy(), mean[:, 0].numpy(), 'b')
# Shade in confidence
y1_ax.fill_between(test_x.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)
y1_ax.set_ylim([-3, 3])
y1_ax.legend(['Observed Data', 'Mean', 'Confidence'])
y1_ax.set_title('Observed Values (Likelihood)')
# Plot training data as black stars
y2_ax.plot(train_x.detach().numpy(), train_y[:, 1].detach().numpy(), 'k*')
# Predictive mean as blue line
y2_ax.plot(test_x.numpy(), mean[:, 1].numpy(), 'b')
# Shade in confidence
y2_ax.fill_between(test_x.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)
y2_ax.set_ylim([-3, 3])
y2_ax.legend(['Observed Data', 'Mean', 'Confidence'])
y2_ax.set_title('Observed Values (Likelihood)')
None
"""
Explanation: Make predictions with the model
End of explanation
"""
|
meli-lewis/pygotham2015 | jupyter_panda.ipynb | mit | from __future__ import division
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import rpy2
from IPython.display import display, Image, YouTubeVideo
%matplotlib inline
"""
Explanation: Introduction to data munging with Jupyter and pandas
PyGotham 2015
End of explanation
"""
pd.re
from math import
"""
Explanation: The case for open source data tools
Reproducibility and Transparency
Cost -- compare capabilities between software you already use and open source here
Allows a diversity of platforms on a given team
The case for notebooks
They're amenable to sketching, and they're amenable to reproducibility.
You can retrace your own steps and also make a narrative for someone else to follow.
Built-in documentation improves workflow; magic methods anticipate data analysis needs.
IPython/Jupyter Notebook: some cool tips for beginners
1. holy moly tab completion
End of explanation
"""
?pd.read_csv
"""
Explanation: 2. built-in documentation
shift-tab brings up brief function documentation
End of explanation
"""
%quickref
%lsmagic
%load_ext rmagic
%R x <- c(0:10, 50)
%R xm <- mean(x)
%R c(xm, mean(x, trim = 0.10))
"""
Explanation: 3. markup!
Markdown and HTML
<img src='http://i.imgur.com/WypQf94.gif' align='left'></span>
<br>
<br>
<br>
LaTeX
<span style="font-size: 24px"> $\bar{x} = \frac{\sum_{i=1}^{n}w_i\cdot x_i}{\sum_{i=1}^{n}w_i}$</span>
Syntax highlighting for other languages
R
x <- c(0:10, 50)
xm <- mean(x)
c(xm, mean(x, trim = 0.10))
4. magic methods
End of explanation
"""
YouTubeVideo("L4Hbv4ugUWk")
"""
Explanation: 5. multimedia
End of explanation
"""
!ls
"""
Explanation: 6. Sharing! Notebooks are now viewable directly on github, and also exportable as PDF or HTML.
7. Shell commands!
End of explanation
"""
?pd.read_csv()
# read in a CSV
# specify that zipcode should be treated as a string rather than an int!
AGI = pd.read_csv('12zpallagi.csv',dtype={'zipcode': str})
AGI.info()
"""
Explanation: 8. Keyboard shortcuts! Your implementation may vary.
Use what's here or roll your own.
pandas
Input
Source: IRS.gov
End of explanation
"""
# you can select columns by label or position!
AGI_column_subset = AGI[['STATE','AGI_STUB','zipcode','N1','A00100']]
# get information about type for a given field, and how many values you can expect for each
AGI_column_subset.info()
AGI_column_subset.describe()
# note this is inclusive!
AGI_row_subset = AGI_column_subset.ix[6:11]
AGI_row_subset
AGI_column_subset.rename(columns={'N1':'population','A00100':'amount'},inplace=True)
AGI_column_subset.head()
# group by zipcode and sum other values, resetting index
AGI_grouped = AGI_column_subset.groupby('zipcode').sum().reset_index()
AGI_grouped.head()
"""
Explanation: Transformation
End of explanation
"""
AGI_grouped['population'].mean()
#this can also be done using the na_values param upon being read in
null_zips = (AGI_grouped['zipcode'] == '00000')
AGI_grouped.loc[null_zips, 'zipcode'] = np.nan
AGI_grouped.head()
AGI_notnull = AGI_grouped.dropna()
AGI_notnull['population'].mean()
AGI_grouped.dropna(inplace=True)
# make a new column with the real amount, not in thousands
AGI_grouped['actual_amount'] = AGI_grouped['amount'] * 1000
"""
Explanation: A WILD ZIP CODE APPEARS!
<img src="http://ecdn.funzypics.com/grumpycatmemes/pics/16/OH-No--The-Cute-Kitty-Cats-Are-Attacking-Like-Aliens----The-Hunt-Is-On-Wow-Quest-Guide-.jpg" align="left">
Delete or render null? You decide!
End of explanation
"""
%timeit applied = AGI_grouped['amount'].apply(lambda x: x * 1000)
#being vectorized operations, this is happening at the C level and thereby much faster
%timeit vectorized = AGI_grouped['amount'] * 1000
AGI_grouped
# make a mean, using standard math operations!
AGI_grouped['weighted_mean_AGI'] = AGI_grouped['actual_amount']/AGI_grouped['population']
#use anonymous functions to change every value in a column!
#because this is an apply, much slower
AGI_grouped['weighted_mean_AGI']= AGI_grouped['weighted_mean_AGI'].apply(lambda x: round(x, 0))
AGI_grouped.info()
AGI_grouped.describe()
# drop columns you won't need
AGI_grouped.drop(['AGI_STUB','amount','actual_amount'],axis=1,inplace=True)
AGI_grouped.head()
"""
Explanation: Keep in mind you have options, and use magic methods to test implementation inline!
End of explanation
"""
# also look into pandas.Series.unique
AGI_subset_geo = AGI[['zipcode','STATE']].drop_duplicates()
AGI_subset_geo.head()
#merge rather than join if you want to use a common column other than the index
AGI_final = pd.merge(AGI_grouped, AGI_subset_geo, how='left', on='zipcode')
AGI_final.head()
# this gives you the greatest weighted_mean_AGI first
AGI_final.sort('weighted_mean_AGI',ascending=False).head()
# chain methods!
AGI_final.groupby('STATE').mean().sort('weighted_mean_AGI',ascending=False)
AGI_final.sort('weighted_mean_AGI').head().plot(kind='bar')
"""
Explanation: Merging! Better than in traffic!
Group by knows aggregating strings is nonsensical, and so drops those.
But let's add state information to the mix again!
End of explanation
"""
|
royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | ex04-Read nino3 SSTA series in npz format, plot and save the image.ipynb | mit | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt # to generate plots
"""
Explanation: Read nino3 SSTA time series, Plot and Save the image
In this noteboo, we will finish the following operations
* read time series data produced bya previous notebook
* have a quick plot
* decorate plots
* save image
1. Load basic libraries
End of explanation
"""
npzfile = np.load('data/ssta.nino3.30y.npz')
npzfile.files
ssta_series = npzfile['ssta_series']
ssta_series.shape
"""
Explanation: 2. Load nino3 SSTA series
Please keep in mind that the nino3 SSTA series lies between 1970 and 1999 <br>
Recall ex2
End of explanation
"""
plt.plot(ssta_series)
"""
Explanation: 3. Have a quick plot
End of explanation
"""
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(111)
plt.plot(ssta_series, 'g-', linewidth=2)
plt.xlabel('Years')
plt.ylabel('[$^oC$]')
plt.title('nino3 SSTA 30-year (1970-1999)', fontsize=12)
ax.set_xlim(0,361)
ax.set_ylim(-3.5,3.5)
ax.set_xticklabels(range(1970,2000,1*4))
ax.axhline(0, color='r')
plt.grid(True)
ax.autoscale_view()
plt.savefig('image/ssta_series_30y.png')
"""
Explanation: 4. Make it beautiful
4.1 Add Year ticks and grid lines, etc.
More info can be found from https://matplotlib.org/users/pyplot_tutorial.html
End of explanation
"""
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(111)
xtime = np.linspace(1,360,360)
ssta_series = ssta_series.reshape((360))
ax.plot(xtime, ssta_series, 'black', alpha=1.00, linewidth=2)
ax.fill_between(xtime, 0., ssta_series, ssta_series> 0., color='red', alpha=.75)
ax.fill_between(xtime, 0., ssta_series, ssta_series< 0., color='blue', alpha=.75)
plt.xlabel('Years')
plt.ylabel('[$^oC$]')
plt.title('nino3 SSTA 30-year (1970-1999)', fontsize=12)
ax.set_xlim(0, 361)
ax.set_ylim(-4, 4)
ax.set_xticklabels(range(1970,2000,1*4))
plt.grid(True)
ax.autoscale_view()
"""
Explanation: 4.2 More professional
Just like the image from https://www.esrl.noaa.gov/psd/enso/mei/
End of explanation
"""
|
arnoldlu/lisa | ipynb/examples/devlib/cgroups_example.ipynb | apache-2.0 | import logging
from conf import LisaLogging
LisaLogging.setup()
import os
import json
import operator
import devlib
import trappy
import bart
from bart.sched.SchedMultiAssert import SchedMultiAssert
from wlgen import RTA, Periodic
"""
Explanation: Cgroups
cgroups (abbreviated from control groups) is a Linux kernel feature that limits, accounts for, and isolates the resource usage (CPU, memory, disk I/O, network, etc.) of a collection of processes.
A control group is a collection of processes that are bound by the same criteria and associated with a set of parameters or limits. These groups can be hierarchical, meaning that each group inherits limits from its parent group. The kernel provides access to multiple controllers (also called subsystems) through the cgroup interface, for example, the "memory" controller limits memory use, "cpuacct" accounts CPU usage, etc.
End of explanation
"""
from env import TestEnv
my_conf = {
# Android Pixel
"platform" : "android",
"board" : "pixel",
"device" : "HT6670300102",
"ANDROID_HOME" : "/home/vagrant/lisa/tools/android-sdk-linux/",
"exclude_modules" : [ "hwmon" ],
# List of additional devlib modules to install
"modules" : ['cgroups', 'bl', 'cpufreq'],
# List of additional binary tools to install
"tools" : ['rt-app', 'trace-cmd'],
# FTrace events to collect
"ftrace" : {
"events" : [
"sched_switch"
],
"buffsize" : 10240
}
}
te = TestEnv(my_conf, force_new=True)
target = te.target
# Report target connection
logging.info('Connected to %s target', target.abi)
print "DONE"
"""
Explanation: Target Configuration
The target configuration is used to describe and configure your test environment.
You can find more details in examples/utils/testenv_example.ipynb.
End of explanation
"""
logging.info('%14s - Available controllers:', 'CGroup')
ssys = target.cgroups.list_subsystems()
for (n,h,g,e) in ssys:
logging.info('%14s - %10s (hierarchy id: %d) has %d cgroups',
'CGroup', n, h, g)
"""
Explanation: List available Controllers
Details on the available controllers (or subsystems) can be found at: https://www.kernel.org/doc/Documentation/cgroup-v1/.
End of explanation
"""
# Get a reference to the CPUSet controller
cpuset = target.cgroups.controller('cpuset')
# Get the list of current configured CGroups for that controller
cgroups = cpuset.list_all()
logging.info('Existing CGropups:')
for cg in cgroups:
logging.info(' %s', cg)
# Dump the configuraiton of each controller
for cgname in cgroups:
#print cgname
cgroup = cpuset.cgroup(cgname)
attrs = cgroup.get()
#print attrs
cpus = attrs['cpus']
logging.info('%s:%-15s cpus: %s', cpuset.kind, cgroup.name, cpus)
# Create a LITTLE partition
cpuset_littles = cpuset.cgroup('/LITTLE')
# Check the attributes available for this control group
print "LITTLE:\n", json.dumps(cpuset_littles.get(), indent=4)
# Tune CPUs and MEMs attributes
# they must be initialize for the group to be usable
cpuset_littles.set(cpus=target.bl.littles, mems=0)
print "LITTLE:\n", json.dumps(cpuset_littles.get(), indent=4)
# Define a periodic big (80%) task
task = Periodic(
period_ms=100,
duty_cycle_pct=80,
duration_s=5).get()
# Create one task per each CPU in the target
tasks={}
for tid in enumerate(target.core_names):
tasks['task{}'.format(tid[0])] = task
# Configure RTA to run all these tasks
rtapp = RTA(target, 'simple', calibration=te.calibration())
rtapp.conf(kind='profile', params=tasks, run_dir=target.working_directory);
# Test execution of all these tasks into the LITTLE cluster
trace = rtapp.run(ftrace=te.ftrace, cgroup=cpuset_littles.name, out_dir=te.res_dir)
# Check tasks residency on little clsuter
trappy.plotter.plot_trace(trace)
# Compute and visualize tasks residencies on LITTLE clusterh CPUs
s = SchedMultiAssert(trappy.FTrace(trace), te.topology, execnames=tasks.keys())
residencies = s.getResidency('cluster', target.bl.littles, percent=True)
print json.dumps(residencies, indent=4)
# Assert that ALL tasks have always executed only on LITTLE cluster
s.assertResidency('cluster', target.bl.littles,
99.9, operator.ge, percent=True, rank=len(residencies))
"""
Explanation: Example of CPUSET controller usage
Cpusets provide a mechanism for assigning a set of CPUs and memory nodes to a set of tasks. Cpusets constrain the CPU and memory placement of tasks to only the resources available within a task's current cpuset. They form a nested hierarchy visible in a virtual file system. These are the essential hooks, beyond what is already present, required to manage dynamic job placement on large systems.
More information can be found in the kernel documentation: https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt.
End of explanation
"""
# Get a reference to the CPU controller
cpu = target.cgroups.controller('cpu')
# Create a big partition on that CPUS
cpu_littles = cpu.cgroup('/LITTLE')
# Check the attributes available for this control group
print "LITTLE:\n", json.dumps(cpu_littles.get(), indent=4)
# Set a 1CPU equivalent bandwidth for that CGroup
# cpu_littles.set(cfs_period_us=100000, cfs_quota_us=50000)
cpu_littles.set(shares=512)
print "LITTLE:\n", json.dumps(cpu_littles.get(), indent=4)
# Test execution of all these tasks into the LITTLE cluster
trace = rtapp.run(ftrace=te.ftrace, cgroup=cpu_littles.name)
# Check tasks residency on little cluster
trappy.plotter.plot_trace(trace)
"""
Explanation: Example of CPU controller usage
While the CPUSET is a controller to assign CPUs and memory nodes for a set of tasks, the CPU controller is used to assign CPU bandwidth.
End of explanation
"""
# Isolate CPU0
# This works by moving all user-space tasks into a cpuset
# which does not include the specified list of CPUs to be
# isolated.
sandbox, isolated = target.cgroups.isolate(cpus=[0])
# Check the attributes available for the SANDBOX group
print "Sandbox:\n", json.dumps(sandbox.get(), indent=4)
# Check the attributes available for the ISOLATED group
print "Isolated:\n", json.dumps(isolated.get(), indent=4)
# Run some workload, which is expected to not run in the ISOLATED cpus:
trace = rtapp.run(ftrace=te.ftrace)
# Check tasks was not running on ISOLATED CPUs
trappy.plotter.plot_trace(trace)
# Compute and visualize tasks residencies on ISOLATED CPUs
s = SchedMultiAssert(trappy.FTrace(trace), te.topology, execnames=tasks.keys())
residencies = s.getResidency('cpu', [0], percent=True)
print json.dumps(residencies, indent=4)
# Assert that ISOLATED CPUs was not running workload tasks
s.assertResidency('cpu', [0], 0.0, operator.eq, percent=True, rank=len(residencies))
"""
Explanation: Example of CPUs isolation
End of explanation
"""
|
HaFl/ufldl-tutorial-python | Gradient_Checking.ipynb | mit | data_original = np.loadtxt('stanford_dl_ex/ex1/housing.data')
data = np.insert(data_original, 0, 1, axis=1)
np.random.shuffle(data)
train_X = data[:400, :-1]
train_y = data[:400, -1]
m, n = train_X.shape
theta = np.random.rand(n)
"""
Explanation: Preparation (Based on Linear Regression)
Prepare train and test data.
End of explanation
"""
def cost_function(theta, X, y):
squared_errors = (X.dot(theta) - y) ** 2
J = 0.5 * squared_errors.sum()
return J
def gradient(theta, X, y):
errors = X.dot(theta) - y
return errors.dot(X)
"""
Explanation: Define some necessary functions.
End of explanation
"""
epsilon = 1e-4
"""
Explanation: Gradient Checking
Define "step size" (don't set it too low to avoid numerical precision issues).
End of explanation
"""
mask = np.identity(theta.size)
theta_plus = theta + epsilon * mask
theta_minus = theta - epsilon * mask
"""
Explanation: Prepare theta step values (making use of numpy broadcasting).
End of explanation
"""
diffs = np.empty_like(theta)
for i in range(theta_plus.shape[0]):
gradient_def = (
(cost_function(theta_plus[i], train_X, train_y) - cost_function(theta_minus[i], train_X, train_y)) /
(2 * epsilon)
)
gradient_lin_reg = gradient(theta, train_X, train_y)[i]
diffs[i] = np.absolute(gradient_def - gradient_lin_reg)
diffs
"""
Explanation: Compute diffs between theta's gradient as mathematically defined and the gradient as defined by our function above.
End of explanation
"""
assert all(np.less(diffs, 1e-4))
"""
Explanation: Lookin' good! The smaller the values, the better.<br>
(Any value significantly larger than 1e-4 indicates a problem.)
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion | notebooks/bigquery/solutions/c_extract_and_benchmark.ipynb | apache-2.0 | import pandas as pd
from google.cloud import bigquery
PROJECT = !gcloud config get-value project
PROJECT = PROJECT[0]
%env PROJECT=$PROJECT
"""
Explanation: Extract Datasets and Establish Benchmark
Learning Objectives
- Divide into Train, Evaluation and Test datasets
- Understand why we need each
- Pull data out of BigQuery and into CSV
- Establish Rules Based Benchmark
Introduction
In the previous notebook we demonstrated how to do ML in BigQuery. However BQML is limited to linear models.
For advanced ML we need to pull the data out of BigQuery and load it into a ML Framework, in our case TensorFlow.
While TensorFlow can read from BigQuery directly, the performance is slow. The best practice is to first stage the BigQuery files as .csv files, and then read the .csv files into TensorFlow.
The .csv files can reside on local disk if we're training locally, but if we're training in the cloud we'll need to move the .csv files to the cloud, in our case Google Cloud Storage.
Set up environment variables and load necessary libraries
End of explanation
"""
def create_query(phase, sample_size):
basequery = """
SELECT
(tolls_amount + fare_amount) AS fare_amount,
EXTRACT(DAYOFWEEK from pickup_datetime) AS dayofweek,
EXTRACT(HOUR from pickup_datetime) AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat
FROM
`nyc-tlc.yellow.trips`
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N)) = 1
"""
if phase == "TRAIN":
subsample = """
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) >= (EVERY_N * 0)
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) < (EVERY_N * 70)
"""
elif phase == "VALID":
subsample = """
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) >= (EVERY_N * 70)
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) < (EVERY_N * 85)
"""
elif phase == "TEST":
subsample = """
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) >= (EVERY_N * 85)
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), EVERY_N * 100)) < (EVERY_N * 100)
"""
query = basequery + subsample
return query.replace("EVERY_N", sample_size)
"""
Explanation: Review
In the a_sample_explore_clean notebook we came up with the following query to extract a repeatable and clean sample:
<pre>
#standardSQL
SELECT
(tolls_amount + fare_amount) AS fare_amount, -- label
pickup_datetime,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude
FROM
`nyc-tlc.yellow.trips`
WHERE
-- Clean Data
trip_distance > 0
AND passenger_count > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
-- repeatable 1/5000th sample
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 5000)) = 1
</pre>
We will use the same query with one change. Instead of using pickup_datetime as is, we will extract dayofweek and hourofday from it. This is to give us some categorical features in our dataset so we can illustrate how to deal with them when we get to feature engineering. The new query will be:
<pre>
SELECT
(tolls_amount + fare_amount) AS fare_amount, -- label
EXTRACT(DAYOFWEEK from pickup_datetime) AS dayofweek,
EXTRACT(HOUR from pickup_datetime) AS hourofday,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude
-- rest same as before
</pre>
Split into train, evaluation, and test sets
For ML modeling we need not just one, but three datasets.
Train: This is what our model learns on
Evaluation (aka Validation): We shouldn't evaluate our model on the same data we trained on because then we couldn't know whether it was memorizing the input data or whether it was generalizing. Therefore we evaluate on the evaluation dataset, aka validation dataset.
Test: We use our evaluation dataset to tune our hyperparameters (we'll cover hyperparameter tuning in a future lesson). We need to know that our chosen set of hyperparameters will work well for data we haven't seen before because in production, that will be the case. For this reason, we create a third dataset that we never use during the model development process. We only evaluate on this once our model development is finished. Data scientists don't always create a test dataset (aka holdout dataset), but to be thorough you should.
We can divide our existing 1/5000th sample three ways 70%/15%/15% (or whatever split we like) with some modulo math demonstrated below.
Because we are using a hash function these results are deterministic, we'll get the same exact split every time the query is run (assuming the underlying data hasn't changed)
End of explanation
"""
bq = bigquery.Client(project=PROJECT)
for phase in ["TRAIN", "VALID", "TEST"]:
# 1. Create query string
query_string = create_query(phase, "5000")
# 2. Load results into DataFrame
df = bq.query(query_string).to_dataframe()
# 3. Write DataFrame to CSV
df.to_csv(f"taxi-{phase.lower()}.csv", index_label=False, index=False)
print("Wrote {} lines to {}".format(len(df), f"taxi-{phase.lower()}.csv"))
"""
Explanation: Write to CSV
Now let's execute a query for train/valid/test and write the results to disk in csv format. We use Pandas's .to_csv() method to do so.
End of explanation
"""
!ls -l *.csv
"""
Explanation: Note that even with a 1/5000th sample we have a good amount of data for ML. 150K training examples and 30K validation.
Verify that datasets exist
End of explanation
"""
!head taxi-train.csv
"""
Explanation: Preview one of the files
End of explanation
"""
def euclidean_distance(df):
return (
(df["pickuplat"] - df["dropofflat"]) ** 2
+ (df["pickuplon"] - df["dropofflon"]) ** 2
) ** 0.5
def compute_rmse(actual, predicted):
return (((actual - predicted) ** 2).mean()) ** 0.5
def print_rmse(df, rate, name):
print(
"{} RMSE = {}".format(
compute_rmse(df["fare_amount"], rate * euclidean_distance(df)),
name,
)
)
df_train = pd.read_csv("taxi-train.csv")
df_valid = pd.read_csv("taxi-valid.csv")
rate = df_train["fare_amount"].mean() / euclidean_distance(df_train).mean()
print_rmse(df_train, rate, "Train")
print_rmse(df_valid, rate, "Valid")
"""
Explanation: Looks good! We now have our ML datasets and are ready to train ML models, validate them and test them.
Establish rules-based benchmark
Before we start building complex ML models, it is a good idea to come up with a simple rules based model and use that as a benchmark. After all, there's no point using ML if it can't beat the traditional rules based approach!
Our rule is going to be to divide the mean fare_amount by the mean estimated distance to come up with a rate and use that to predict.
Recall we can't use the actual trip_distance because we won't have that available at prediction time (depends on the route taken), however we do know the users pick up and drop off location so we can use euclidean distance between those coordinates.
End of explanation
"""
|
f-guitart/data_mining | notes/96 - Summary - Indexing and Apllying Functions.ipynb | gpl-3.0 | import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df["A"] #indexing
df.A #attribute
type(df.A)
df.A[0]
df[["A","B"]]
type(df[["A","B"]])
"""
Explanation: Series/Dataframes slicing and function application
Slicing
(from panda docs: https://pandas.pydata.org/pandas-docs/stable/indexing.html)
The axis labeling information in pandas objects serves many purposes:
Identifies data (i.e. provides metadata) using known indicators, important for analysis, visualization, and interactive console display
Enables automatic and explicit data alignment
Allows intuitive getting and setting of subsets of the data set
indexing operators: []
attribute operators: .
Example:
End of explanation
"""
s = df["A"]
s[:5]
s[::2]
s[::-1]
"""
Explanation: Slicing ranges
The most robust way to slice Dataframes is by using .loc and .iloc methods, however the following also holds:
End of explanation
"""
df[:3] # for convenience as it is a common use
df["A"]
"""
Explanation: Watchout... this is a rather incoherent use of the indexinf method over rows. That's why it is said that loc provides a more coherent use.
End of explanation
"""
df.loc[:,"B":]
df.loc[4:,"C":] = 0
df
"""
Explanation: The .loc attribute is the primary access method. The following are valid inputs:
A single label, e.g. 5 or 'a', (note that 5 is interpreted as a label of the index. This use is not an integer position along the index)
A list or array of labels ['a', 'b', 'c']
A slice object with labels 'a':'f' (note that contrary to usual python slices, both the start and the stop are included!)
A boolean array
A callable, see Selection By Callable
End of explanation
"""
df.loc[:,"A"]>0
df.loc[df.loc[:,"A"]>0]
"""
Explanation: Boolean accessing
End of explanation
"""
df.iloc[0,0]
df.iloc[3:,2:]
df.iloc[[0,1,3],[1,3]]
"""
Explanation: Accessing by position by .iloc
End of explanation
"""
df.loc[:,lambda df: df.columns == "A"]
"""
Explanation: Selection by callable
End of explanation
"""
df["X"] = range(0, df.shape[0])
df
df[df["X"].isin([0,2])]
"""
Explanation: Selection by isin
End of explanation
"""
df.where(df["A"]>0)
df.where(df["A"]>0,100)
"""
Explanation: The where() Method
End of explanation
"""
s * 2
s.max()
np.max(s)
s.apply(np.max)
def multiply_by_2(x):
return x*2
s.apply(multiply_by_2)
s.apply(lambda x: x*2)
s.map(lambda x: x*2)
mydict={2:"a"}
df["X"].map(mydict)
"""
Explanation: Applying Functions
Over series:
* Vectorized functions
* Apply/map
End of explanation
"""
df.apply(np.max,axis=0)
df.apply(np.max,axis=1)
df.applymap(lambda x: x*2)
"""
Explanation: Over dataframes:
* apply (we can decide which axis)
* applymap
End of explanation
"""
|
alorenzo175/pvlib-python | docs/tutorials/pvsystem.ipynb | bsd-3-clause | # built-in python modules
import os
import inspect
import datetime
# scientific python add-ons
import numpy as np
import pandas as pd
# plotting stuff
# first line makes the plots appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
# seaborn makes your plots look better
try:
import seaborn as sns
sns.set(rc={"figure.figsize": (12, 6)})
except ImportError:
print('We suggest you install seaborn using conda or pip and rerun this cell')
# finally, we import the pvlib library
import pvlib
"""
Explanation: pvsystem tutorial
This tutorial explores the pvlib.pvsystem module. The module has functions for importing PV module and inverter data and functions for modeling module and inverter performance.
systemdef
Angle of Incidence Modifiers
Sandia Cell Temp correction
Sandia Inverter Model
Sandia Array Performance Model
SAPM IV curves
DeSoto Model
Single Diode Model
This tutorial has been tested against the following package versions:
* pvlib 0.4.5
* Python 3.6.2
* IPython 6.0
* Pandas 0.20.1
It should work with other Python and Pandas versions. It requires pvlib >= 0.4.0 and IPython >= 3.0.
Authors:
* Will Holmgren (@wholmgren), University of Arizona. 2015, March 2016, November 2016, May 2017.
End of explanation
"""
import pvlib
from pvlib import pvsystem
"""
Explanation: systemdef
End of explanation
"""
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(pvlib)))
tmy3_data, tmy3_metadata = pvlib.tmy.readtmy3(os.path.join(pvlib_abspath, 'data', '703165TY.csv'))
tmy2_data, tmy2_metadata = pvlib.tmy.readtmy2(os.path.join(pvlib_abspath, 'data', '12839.tm2'))
pvlib.pvsystem.systemdef(tmy3_metadata, 0, 0, .1, 5, 5)
pvlib.pvsystem.systemdef(tmy2_metadata, 0, 0, .1, 5, 5)
"""
Explanation: pvlib can import TMY2 and TMY3 data. Here, we import the example files.
End of explanation
"""
angles = np.linspace(-180,180,3601)
ashraeiam = pd.Series(pvsystem.ashraeiam(angles, .05), index=angles)
ashraeiam.plot()
plt.ylabel('ASHRAE modifier')
plt.xlabel('input angle (deg)')
angles = np.linspace(-180,180,3601)
physicaliam = pd.Series(pvsystem.physicaliam(angles), index=angles)
physicaliam.plot()
plt.ylabel('physical modifier')
plt.xlabel('input index')
plt.figure()
ashraeiam.plot(label='ASHRAE')
physicaliam.plot(label='physical')
plt.ylabel('modifier')
plt.xlabel('input angle (deg)')
plt.legend()
"""
Explanation: Angle of Incidence Modifiers
End of explanation
"""
# scalar inputs
pvsystem.sapm_celltemp(900, 5, 20) # irrad, wind, temp
# vector inputs
times = pd.DatetimeIndex(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
pvtemps = pvsystem.sapm_celltemp(irrads, winds, temps)
pvtemps.plot()
"""
Explanation: Sandia Cell Temp correction
PV system efficiency can vary by up to 0.5% per degree C, so it's important to accurately model cell and module temperature. The sapm_celltemp function uses plane of array irradiance, ambient temperature, wind speed, and module and racking type to calculate cell and module temperatures. From King et. al. (2004):
$$T_m = E e^{a+b*WS} + T_a$$
$$T_c = T_m + \frac{E}{E_0} \Delta T$$
The $a$, $b$, and $\Delta T$ parameters depend on the module and racking type. The default parameter set is open_rack_cell_glassback.
sapm_celltemp works with either scalar or vector inputs, but always returns a pandas DataFrame.
End of explanation
"""
wind = np.linspace(0,20,21)
temps = pd.DataFrame(pvsystem.sapm_celltemp(900, wind, 20), index=wind)
temps.plot()
plt.legend()
plt.xlabel('wind speed (m/s)')
plt.ylabel('temperature (deg C)')
"""
Explanation: Cell and module temperature as a function of wind speed.
End of explanation
"""
atemp = np.linspace(-20,50,71)
temps = pvsystem.sapm_celltemp(900, 2, atemp).set_index(atemp)
temps.plot()
plt.legend()
plt.xlabel('ambient temperature (deg C)')
plt.ylabel('temperature (deg C)')
"""
Explanation: Cell and module temperature as a function of ambient temperature.
End of explanation
"""
irrad = np.linspace(0,1000,101)
temps = pvsystem.sapm_celltemp(irrad, 2, 20).set_index(irrad)
temps.plot()
plt.legend()
plt.xlabel('incident irradiance (W/m**2)')
plt.ylabel('temperature (deg C)')
"""
Explanation: Cell and module temperature as a function of incident irradiance.
End of explanation
"""
models = ['open_rack_cell_glassback',
'roof_mount_cell_glassback',
'open_rack_cell_polymerback',
'insulated_back_polymerback',
'open_rack_polymer_thinfilm_steel',
'22x_concentrator_tracker']
temps = pd.DataFrame(index=['temp_cell','temp_module'])
for model in models:
temps[model] = pd.Series(pvsystem.sapm_celltemp(1000, 5, 20, model=model).iloc[0])
temps.T.plot(kind='bar') # try removing the transpose operation and replotting
plt.legend()
plt.ylabel('temperature (deg C)')
"""
Explanation: Cell and module temperature for different module and racking types.
End of explanation
"""
inverters = pvsystem.retrieve_sam('sandiainverter')
inverters
vdcs = pd.Series(np.linspace(0,50,51))
idcs = pd.Series(np.linspace(0,11,110))
pdcs = idcs * vdcs
pacs = pvsystem.snlinverter(vdcs, pdcs, inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'])
#pacs.plot()
plt.plot(pacs, pdcs)
plt.ylabel('ac power')
plt.xlabel('dc power')
"""
Explanation: snlinverter
End of explanation
"""
cec_modules = pvsystem.retrieve_sam('cecmod')
cec_modules
cecmodule = cec_modules.Example_Module
cecmodule
"""
Explanation: Need to put more effort into describing this function.
DC model
This example shows use of the Desoto module performance model and the Sandia Array Performance Model (SAPM). Both models reuire a set of parameter values which can be read from SAM databases for modules.
Foe the Desoto model, the database content is returned by supplying the keyword cecmod to pvsystem.retrievesam.
End of explanation
"""
sandia_modules = pvsystem.retrieve_sam(name='SandiaMod')
sandia_modules
sandia_module = sandia_modules.Canadian_Solar_CS5P_220M___2009_
sandia_module
"""
Explanation: The Sandia module database is read by the same function with the keyword SandiaMod.
End of explanation
"""
from pvlib import clearsky
from pvlib import irradiance
from pvlib import atmosphere
from pvlib.location import Location
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times_loc = pd.date_range(start=datetime.datetime(2014,4,1), end=datetime.datetime(2014,4,2), freq='30s', tz=tus.tz)
solpos = pvlib.solarposition.get_solarposition(times_loc, tus.latitude, tus.longitude)
dni_extra = pvlib.irradiance.extraradiation(times_loc)
airmass = pvlib.atmosphere.relativeairmass(solpos['apparent_zenith'])
pressure = pvlib.atmosphere.alt2pres(tus.altitude)
am_abs = pvlib.atmosphere.absoluteairmass(airmass, pressure)
cs = tus.get_clearsky(times_loc)
surface_tilt = tus.latitude
surface_azimuth = 180 # pointing south
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth,
solpos['apparent_zenith'], solpos['azimuth'])
total_irrad = pvlib.irradiance.total_irrad(surface_tilt,
surface_azimuth,
solpos['apparent_zenith'],
solpos['azimuth'],
cs['dni'], cs['ghi'], cs['dhi'],
dni_extra=dni_extra,
model='haydavies')
"""
Explanation: Generate some irradiance data for modeling.
End of explanation
"""
module = sandia_module
# a sunny, calm, and hot day in the desert
temps = pvsystem.sapm_celltemp(total_irrad['poa_global'], 0, 30)
effective_irradiance = pvlib.pvsystem.sapm_effective_irradiance(
total_irrad['poa_direct'], total_irrad['poa_diffuse'],
am_abs, aoi, module)
sapm_1 = pvlib.pvsystem.sapm(effective_irradiance, temps['temp_cell'], module)
sapm_1.plot()
def plot_sapm(sapm_data, effective_irradiance):
"""
Makes a nice figure with the SAPM data.
Parameters
----------
sapm_data : DataFrame
The output of ``pvsystem.sapm``
"""
fig, axes = plt.subplots(2, 3, figsize=(16,10), sharex=False, sharey=False, squeeze=False)
plt.subplots_adjust(wspace=.2, hspace=.3)
ax = axes[0,0]
sapm_data.filter(like='i_').plot(ax=ax)
ax.set_ylabel('Current (A)')
ax = axes[0,1]
sapm_data.filter(like='v_').plot(ax=ax)
ax.set_ylabel('Voltage (V)')
ax = axes[0,2]
sapm_data.filter(like='p_').plot(ax=ax)
ax.set_ylabel('Power (W)')
ax = axes[1,0]
[ax.plot(effective_irradiance, current, label=name) for name, current in
sapm_data.filter(like='i_').iteritems()]
ax.set_ylabel('Current (A)')
ax.set_xlabel('Effective Irradiance')
ax.legend(loc=2)
ax = axes[1,1]
[ax.plot(effective_irradiance, voltage, label=name) for name, voltage in
sapm_data.filter(like='v_').iteritems()]
ax.set_ylabel('Voltage (V)')
ax.set_xlabel('Effective Irradiance')
ax.legend(loc=4)
ax = axes[1,2]
ax.plot(effective_irradiance, sapm_data['p_mp'], label='p_mp')
ax.set_ylabel('Power (W)')
ax.set_xlabel('Effective Irradiance')
ax.legend(loc=2)
# needed to show the time ticks
for ax in axes.flatten():
for tk in ax.get_xticklabels():
tk.set_visible(True)
plot_sapm(sapm_1, effective_irradiance)
"""
Explanation: Now we can run the module parameters and the irradiance data through the SAPM functions.
End of explanation
"""
temps = pvsystem.sapm_celltemp(total_irrad['poa_global'], 10, 5)
sapm_2 = pvlib.pvsystem.sapm(effective_irradiance, temps['temp_cell'], module)
plot_sapm(sapm_2, effective_irradiance)
sapm_1['p_mp'].plot(label='30 C, 0 m/s')
sapm_2['p_mp'].plot(label=' 5 C, 10 m/s')
plt.legend()
plt.ylabel('Pmp')
plt.title('Comparison of a hot, calm day and a cold, windy day')
"""
Explanation: For comparison, here's the SAPM for a sunny, windy, cold version of the same day.
End of explanation
"""
import warnings
warnings.simplefilter('ignore', np.RankWarning)
def sapm_to_ivframe(sapm_row):
pnt = sapm_row
ivframe = {'Isc': (pnt['i_sc'], 0),
'Pmp': (pnt['i_mp'], pnt['v_mp']),
'Ix': (pnt['i_x'], 0.5*pnt['v_oc']),
'Ixx': (pnt['i_xx'], 0.5*(pnt['v_oc']+pnt['v_mp'])),
'Voc': (0, pnt['v_oc'])}
ivframe = pd.DataFrame(ivframe, index=['current', 'voltage']).T
ivframe = ivframe.sort_values(by='voltage')
return ivframe
def ivframe_to_ivcurve(ivframe, points=100):
ivfit_coefs = np.polyfit(ivframe['voltage'], ivframe['current'], 30)
fit_voltages = np.linspace(0, ivframe.loc['Voc', 'voltage'], points)
fit_currents = np.polyval(ivfit_coefs, fit_voltages)
return fit_voltages, fit_currents
times = ['2014-04-01 07:00:00', '2014-04-01 08:00:00', '2014-04-01 09:00:00',
'2014-04-01 10:00:00', '2014-04-01 11:00:00', '2014-04-01 12:00:00']
times.reverse()
fig, ax = plt.subplots(1, 1, figsize=(12,8))
for time in times:
ivframe = sapm_to_ivframe(sapm_1.loc[time])
fit_voltages, fit_currents = ivframe_to_ivcurve(ivframe)
ax.plot(fit_voltages, fit_currents, label=time)
ax.plot(ivframe['voltage'], ivframe['current'], 'ko')
ax.set_xlabel('Voltage (V)')
ax.set_ylabel('Current (A)')
ax.set_ylim(0, None)
ax.set_title('IV curves at multiple times')
ax.legend()
"""
Explanation: SAPM IV curves
The IV curve function only calculates the 5 points of the SAPM. We will add arbitrary points in a future release, but for now we just interpolate between the 5 SAPM points.
End of explanation
"""
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = (
pvsystem.calcparams_desoto(total_irrad['poa_global'],
temp_cell=temps['temp_cell'],
alpha_sc=cecmodule['alpha_sc'],
a_ref=cecmodule['a_ref'],
I_L_ref=cecmodule['I_L_ref'],
I_o_ref=cecmodule['I_o_ref'],
R_sh_ref=cecmodule['R_sh_ref'],
R_s=cecmodule['R_s']) )
photocurrent.plot()
plt.ylabel('Light current I_L (A)')
saturation_current.plot()
plt.ylabel('Saturation current I_0 (A)')
resistance_series
resistance_shunt.plot()
plt.ylabel('Shunt resistance (ohms)')
plt.ylim(0,100)
nNsVth.plot()
plt.ylabel('nNsVth')
"""
Explanation: desoto
The same weather data run through the Desoto model.
End of explanation
"""
single_diode_out = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
single_diode_out['i_sc'].plot()
single_diode_out['v_oc'].plot()
single_diode_out['p_mp'].plot()
"""
Explanation: Single diode model
End of explanation
"""
|
sdpython/ensae_teaching_cs | _doc/notebooks/td1a_home/2020_tsp.ipynb | mit | from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
"""
Explanation: Algo - TSP - Traveling Salesman Problem
TSP, Traveling Salesman Problem ou Problème du Voyageur de Commerce est un problème classique. Il s'agit de trouver le plus court chemin passant par des villes en supposant qu'il existe une route entre chaque paire de villes.
End of explanation
"""
import numpy
import matplotlib.pyplot as plt
villes = numpy.random.rand(20, 2)
plt.plot(villes[:, 0], villes[:, 1], 'b-o')
plt.plot([villes[0, 0], villes[-1, 0]],
[villes[0, 1], villes[-1, 1]], 'b-o');
"""
Explanation: Enoncé
On part d'un ensemble de villes aléatoires.
End of explanation
"""
plt.plot(villes[:, 0], villes[:, 1], 'b-o')
plt.plot([villes[0, 0], villes[-1, 0]],
[villes[0, 1], villes[-1, 1]], 'b-o');
"""
Explanation: Q1 : choisir une permutation aléatoire des villes et calculer la distance du chemin qui les relie dans cet ordre
Q2 : tirer deux villes aléatoirement, les inverser, garder la permutation si elle améliore la distance
Q3 : choisir deux villes aléatoirement, permuter une des deux moitiés...
Q4 : tester toutes les permutations possibles... je plaisante...
Choisir les deux villes les plus proches, les relier, recommencer, puis... vous trouverez bien quelque chose pour finir.
Réponses
Q1
On redessine le parcours entre les villes.
End of explanation
"""
def distance_ville(v1, v2):
return numpy.sum((v1 - v2) ** 2) ** 0.5
def distance_tour(villes, permutation):
tour = distance_ville(villes[permutation[0]],
villes[permutation[-1]])
for i in range(0, len(permutation) - 1):
tour += distance_ville(villes[permutation[i]],
villes[permutation[i + 1]])
return tour
distance_tour(villes, list(range(villes.shape[0])))
"""
Explanation: La première étape consiste à calculer la distance d'un chemin passant par toutes les villes.
End of explanation
"""
def dessine_tour(villes, perm):
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.plot(villes[perm, 0], villes[perm, 1], 'b-o')
ax.plot([villes[perm[0], 0], villes[perm[-1], 0]],
[villes[perm[0], 1], villes[perm[-1], 1]], 'b-o')
ax.set_title("dist=%f" % distance_tour(villes, perm))
return ax
perm = list(range(villes.shape[0]))
dessine_tour(villes, perm);
"""
Explanation: Ensuite, pour voir la solution, on insère le code qui permet de dessiner le chemin dans une fonction.
End of explanation
"""
def ameliore_tour(villes, perm=None):
# On copie la permutation perm pour éviter de modifier celle
# transmise à la fonction. Si la permutation est vide,
# on lui affecte la permutation identique.
perm = (perm.copy() if perm is not None
else list(range(villes.shape[0])))
# On calcule la distance actuelle.
dist_min = distance_tour(villes, perm)
# Initialisation.
cont = True
nb_perm, nb_iter = 0, 0
# Tant que la distance n'est pas améliorée dans les dernières
# len(perm) itérations.
while cont or nb_iter < len(perm):
nb_iter += 1
# On tire deux villes au hasard.
a = numpy.random.randint(0, len(perm) - 2)
b = numpy.random.randint(a + 1, len(perm) - 1)
# On permute les villes.
perm[a], perm[b] = perm[b], perm[a]
# On calcule la nouvelle distance.
dist = distance_tour(villes, perm)
# Si elle est meilleure...
if dist < dist_min:
# On la garde.
dist_min = dist
cont = True
nb_perm += 1
nb_iter = 0
else:
# Sinon, on annule la modification.
perm[a], perm[b] = perm[b], perm[a]
cont = False
return dist_min, nb_perm, perm
dist, nb_perm, perm = ameliore_tour(villes)
print("nb perm", nb_perm)
dessine_tour(villes, perm);
"""
Explanation: Q2
On rédige l'algorithme.
End of explanation
"""
def ameliore_tour_renversement(villes, perm=None):
perm = (perm.copy() if perm is not None
else list(range(villes.shape[0])))
dist_min = distance_tour(villes, perm)
cont = True
nb_perm, nb_iter = 0, 0
while cont or nb_iter < len(perm) ** 2:
nb_iter += 1
# Une partie qui change. On fait une copie de la permutation.
p0 = perm.copy()
a = numpy.random.randint(0, len(perm) - 2)
b = numpy.random.randint(a + 1, len(perm) - 1)
# On retourne une partie de cette permutation.
if a == 0:
perm[0:b] = perm[b:0:-1]
perm[b] = p0[0]
else:
perm[a:b+1] = perm[b:a-1:-1]
# La suite est quasi-identique.
dist = distance_tour(villes, perm)
if dist < dist_min:
dist_min = dist
cont = True
nb_perm += 1
nb_iter = 0
else:
# On reprend la copie. C'est plus simple
# que de faire le retournement inverse.
perm = p0
cont = False
return dist_min, nb_perm, perm
dist, nb_perm, perm = ameliore_tour_renversement(villes)
print("nb perm", nb_perm)
dessine_tour(villes, perm);
"""
Explanation: C'est pas extraordinaire.
Q3
Lorsque deux segments du chemin se croisent, il est possible de construire un autre chemin plus court en retournant une partie du chemin.
End of explanation
"""
from scipy.spatial.distance import cdist
def build_permutation(villes):
pairs = cdist(villes, villes)
max_dist = pairs.ravel().max()
for i in range(villes.shape[0]):
pairs[i, i] = max_dist
arg = numpy.argmin(pairs, axis=1)
arg_dist = [(pairs[i, arg[i]], i, arg[i]) for i in range(villes.shape[0])]
mn = min(arg_dist)
perm = list(mn[1:])
pairs[perm[0], :] = max_dist
pairs[:, perm[0]] = max_dist
while len(perm) < villes.shape[0]:
last = perm[-1]
arg = numpy.argmin(pairs[last:last+1])
perm.append(arg)
pairs[perm[-2], :] = max_dist
pairs[:, perm[-2]] = max_dist
return perm
perm = build_permutation(villes)
dessine_tour(villes, perm);
"""
Explanation: Il n'y a plus de croisements, ce qui est l'effet recherché.
Q4
On pourrait combiner ces deux fonctions pour améliorer l'algorithme qui resterait sans doute très long pour un grand nombre de villes. On pourrait initialiser l'algorithme avec une permutation moins aléatoire pour accélérer la convergence. Pour ce faire, on regroupe les deux villes les plus proches, puis de proche en proche...
End of explanation
"""
dist, nb_perm, perm = ameliore_tour_renversement(villes, perm)
print("nb perm", nb_perm)
dessine_tour(villes, perm);
"""
Explanation: Pas si mal... Il reste un croisement. On applique la fonction de la question précédente.
End of explanation
"""
|
taylort7147/udacity-projects | titanic_survival_exploration/Titanic_Survival_Exploration.ipynb | mit | import numpy as np
import pandas as pd
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
"""
Explanation: Machine Learning Engineer Nanodegree
Introduction and Foundations
Project 0: Titanic Survival Exploration
In 1912, the ship RMS Titanic struck an iceberg on its maiden voyage and sank, resulting in the deaths of most of its passengers and crew. In this introductory project, we will explore a subset of the RMS Titanic passenger manifest to determine which features best predict whether someone survived or did not survive. To complete this project, you will need to implement several conditional predictions and answer the questions below. Your project submission will be evaluated based on the completion of the code and your responses to the questions.
Tip: Quoted sections like this will provide helpful instructions on how to navigate and use an iPython notebook.
Getting Started
To begin working with the RMS Titanic passenger data, we'll first need to import the functionality we need, and load our data into a pandas DataFrame.
Run the code cell below to load our data and display the first few entries (passengers) for examination using the .head() function.
Tip: You can run a code cell by clicking on the cell and using the keyboard shortcut Shift + Enter or Shift + Return. Alternatively, a code cell can be executed using the Play button in the hotbar after selecting it. Markdown cells (text cells like this one) can be edited by double-clicking, and saved using these same shortcuts. Markdown allows you to write easy-to-read plain text that can be converted to HTML.
End of explanation
"""
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
"""
Explanation: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship:
- Survived: Outcome of survival (0 = No; 1 = Yes)
- Pclass: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
- Name: Name of passenger
- Sex: Sex of the passenger
- Age: Age of the passenger (Some entries contain NaN)
- SibSp: Number of siblings and spouses of the passenger aboard
- Parch: Number of parents and children of the passenger aboard
- Ticket: Ticket number of the passenger
- Fare: Fare paid by the passenger
- Cabin Cabin number of the passenger (Some entries contain NaN)
- Embarked: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
Since we're interested in the outcome of survival for each passenger or crew member, we can remove the Survived feature from this dataset and store it as its own separate variable outcomes. We will use these outcomes as our prediction targets.
Run the code cell below to remove Survived as a feature of the dataset and store it in outcomes.
End of explanation
"""
def accuracy_score(truth, pred):
""" Returns accuracy score for input truth and predictions. """
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
"""
Explanation: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
To measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how accurate our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. Run the code cell below to create our accuracy_score function and test a prediction on the first five passengers.
Think: Out of the first five passengers, if we predict that all of them survived, what would you expect the accuracy of our predictions to be?
End of explanation
"""
def predictions_0(data):
""" Model with no features. Always predicts a passenger did not survive. """
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
"""
Explanation: Tip: If you save an iPython Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the code blocks from your previous session to reestablish variables and functions before picking up where you last left off.
Making Predictions
If we were asked to make a prediction about any passenger aboard the RMS Titanic whom we knew nothing about, then the best prediction we could make would be that they did not survive. This is because we can assume that a majority of the passengers (more than 50%) did not survive the ship sinking.
The predictions_0 function below will always predict that a passenger did not survive.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 1
Using the RMS Titanic data, how accurate would a prediction be that none of the passengers survived?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
"""
survival_stats(data, outcomes, 'Sex')
"""
Explanation: Answer: 61.62%
Let's take a look at whether the feature Sex has any indication of survival rates among passengers using the survival_stats function. This function is defined in the titanic_visualizations.py Python script included with this project. The first two parameters passed to the function are the RMS Titanic data and passenger survival outcomes, respectively. The third parameter indicates which feature we want to plot survival statistics across.
Run the code cell below to plot the survival outcomes of passengers based on their sex.
End of explanation
"""
def predictions_1(data):
""" Model with one feature:
- Predict a passenger survived if they are female. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
predictions.append(passenger["Sex"] == "female")
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
"""
Explanation: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction: If a passenger was female, then we will predict that they survived. Otherwise, we will predict the passenger did not survive.
Fill in the missing code below so that the function will make this prediction.
Hint: You can access the values of each feature for a passenger like a dictionary. For example, passenger['Sex'] is the sex of the passenger.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 2
How accurate would a prediction be that all female passengers survived and the remaining passengers did not survive?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
"""
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
"""
Explanation: Answer: 78.68%
Using just the Sex feature for each passenger, we are able to increase the accuracy of our predictions by a significant margin. Now, let's consider using an additional feature to see if we can further improve our predictions. For example, consider all of the male passengers aboard the RMS Titanic: Can we find a subset of those passengers that had a higher rate of survival? Let's start by looking at the Age of each male, by again using the survival_stats function. This time, we'll use a fourth parameter to filter out the data so that only passengers with the Sex 'male' will be included.
Run the code cell below to plot the survival outcomes of male passengers based on their age.
End of explanation
"""
def predictions_2(data):
""" Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
survival_status = False
if passenger["Sex"] == "female":
survival_status = True
elif passenger["Age"] < 10:
survival_status = True
predictions.append(survival_status)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
"""
Explanation: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction: If a passenger was female, then we will predict they survive. If a passenger was male and younger than 10, then we will also predict they survive. Otherwise, we will predict they do not survive.
Fill in the missing code below so that the function will make this prediction.
Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_1.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 3
How accurate would a prediction be that all female passengers and all male passengers younger than 10 survived?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
"""
survival_stats(data, outcomes, "Age", ["Sex == 'female'", "Pclass == 3"])
"""
Explanation: Answer: 79.35
Adding the feature Age as a condition in conjunction with Sex improves the accuracy by a small margin more than with simply using the feature Sex alone. Now it's your turn: Find a series of features and conditions to split the data on to obtain an outcome prediction accuracy of at least 80%. This may require multiple features and multiple levels of conditional statements to succeed. You can use the same feature multiple times with different conditions.
Pclass, Sex, Age, SibSp, and Parch are some suggested features to try.
Use the survival_stats function below to to examine various survival statistics.
Hint: To use mulitple filter conditions, put each condition in the list passed as the last argument. Example: ["Sex == 'male'", "Age < 18"]
End of explanation
"""
def predictions_3(data):
""" Model with multiple features. Makes a prediction with an accuracy of at least 80%. """
male = 0
male_incorrect = 0
female = 0
female_incorrect = 0
predictions = []
i = 0
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
survival_status = False
# Female
if passenger["Sex"] == "female":
# First or second class
if passenger["Pclass"] == 1 or passenger["Pclass"] == 2:
survival_status = True
# Third class
else:
# Passenger did not embark in Southampton
if passenger["Embarked"] != "S":
survival_status = True
# Accumualte mis-predictions
female += 1
if survival_status != outcomes[i]:
female_incorrect += 1
# Male
else:
# Younger than 10
if passenger["Age"] < 10:
if passenger["SibSp"] <= 2:
survival_status = True
# Accumulate mis-predictions
male += 1
if survival_status != outcomes[i]:
male_incorrect += 1
predictions.append(survival_status)
i += 1
# Print accumulated mis-prediction rates
print("Mis-prediction rates:")
print("Male: {}/{} = {}".format(male_incorrect, male, 1.0 * male_incorrect/male))
print("Female: {}/{} = {}".format(female_incorrect, female, 1.0 * female_incorrect/female))
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
"""
Explanation: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Make sure to keep track of the various features and conditions you tried before arriving at your final prediction model.
Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_2.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 4
Describe the steps you took to implement the final prediction model so that it got an accuracy of at least 80%. What features did you look at? Were certain features more informative than others? Which conditions did you use to split the survival outcomes in the data? How accurate are your predictions?
Hint: Run the code cell below to see the accuracy of your predictions.
End of explanation
"""
|
Kaggle/learntools | notebooks/intro_to_programming/raw/tut3.ipynb | apache-2.0 | x = 14
print(x)
print(type(x))
"""
Explanation: Introduction
Whenever you create a variable in Python, it has a value with a corresponding data type. There are many different data types, such as integers, floats, booleans, and strings, all of which we'll cover in this lesson. (This is just a small subset of the available data types -- there are also dictionaries, sets, lists, tuples, and much more.)
Data types are important, because they determine what kinds of actions you can do with them. For instance, you can divide two floats, but you cannot divide two strings. For instance, 12.0/2.0 makes sense, but "cat"/"dog" does not.
To avoid errors, we need to make sure that the actions match the data types that we have.
Integers
Integers are numbers without any fractional part and can be positive (1, 2, 3, ...), negative (-1, -2, -3, ...), or zero (0).
In the code cell below, we set a variable x to an integer. We then verify the data type with type(), and need only pass the variable name into the parentheses.
End of explanation
"""
nearly_pi = 3.141592653589793238462643383279502884197169399375105820974944
print(nearly_pi)
print(type(nearly_pi))
"""
Explanation: In the output above, <class 'int'> refers to the integer data type.
Floats
Floats are numbers with fractional parts. They can have many numbers after decimal.
End of explanation
"""
almost_pi = 22/7
print(almost_pi)
print(type(almost_pi))
"""
Explanation: We can also specify a float with a fraction.
End of explanation
"""
# Round to 5 decimal places
rounded_pi = round(almost_pi, 5)
print(rounded_pi)
print(type(rounded_pi))
"""
Explanation: One function that is particularly useful for fractions is the round() function. It lets you round a number to a specified number of decimal places.
End of explanation
"""
y_float = 1.
print(y_float)
print(type(y_float))
"""
Explanation: Whenever you write an number with a decimal point, Python recognizes it as a float data type.
For instance, 1. (or 1.0, 1.00, etc) will be recognized as a float. This is the case, even though these numbers technically have no fractional part!
End of explanation
"""
z_one = True
print(z_one)
print(type(z_one))
"""
Explanation: Booleans
Booleans represent one of two values: True or False. In the code cell below, z_one is set to a boolean with value True.
End of explanation
"""
z_two = False
print(z_two)
print(type(z_two))
"""
Explanation: Next, z_two is set to a boolean with value False.
End of explanation
"""
z_three = (1 < 2)
print(z_three)
print(type(z_three))
"""
Explanation: Booleans are used to represent the truth value of an expression. Since 1 < 2 is a true statement, z_three takes on a value of True.
End of explanation
"""
z_four = (5 < 3)
print(z_four)
print(type(z_four))
"""
Explanation: Similarly, since 5 < 3 is a false statement, z_four takes on a value of False.
End of explanation
"""
z_five = not z_four
print(z_five)
print(type(z_five))
"""
Explanation: We can switch the value of a boolean by using not. So, not True is equivalent to False, and not False becomes True.
End of explanation
"""
w = "Hello, Python!"
print(w)
print(type(w))
"""
Explanation: Booleans will be important in the next lesson, when you learn about conditions and conditional statements.
Strings
The string data type is a collection of characters (like alphabet letters, punctuation, numerical digits, or symbols) contained in quotation marks. Strings are commonly used to represent text.
End of explanation
"""
print(len(w))
"""
Explanation: You can get the length of a string with len(). "Hello, Python!" has length 14, because it has 14 characters, including the space, comma, and exclamation mark. Note that the quotation marks are not included when calculating the length.
End of explanation
"""
shortest_string = ""
print(type(shortest_string))
print(len(shortest_string))
"""
Explanation: One special type of string is the empty string, which has length zero.
End of explanation
"""
my_number = "1.12321"
print(my_number)
print(type(my_number))
"""
Explanation: If you put a number in quotation marks, it has a string data type.
End of explanation
"""
also_my_number = float(my_number)
print(also_my_number)
print(type(also_my_number))
"""
Explanation: If we have a string that is convertible to a float, we can use float().
This won't always work! For instance, we can convert "10.43430" and "3" to floats, but we cannot convert "Hello, Python!" to a float.
End of explanation
"""
new_string = "abc" + "def"
print(new_string)
print(type(new_string))
"""
Explanation: Just like you can add two numbers (floats or integers), you can also add two strings. It results in a longer string that combines the two original strings by concatenating them.
End of explanation
"""
newest_string = "abc" * 3
print(newest_string)
print(type(newest_string))
"""
Explanation: Note that it's not possible to do subtraction or division with two strings. You also can't multiply two strings, but you can multiply a string by an integer. This again results in a string that's just the original string concatenated with itself a specified number of times.
End of explanation
"""
will_not_work = "abc" * 3.
"""
Explanation: Note that you cannot multiply a string by a float! Trying to do so will return an error.
End of explanation
"""
|
NAU-CFL/Python_Learning_Source | reference_notebooks/Notes-05.ipynb | mit | bruce = 5
print(bruce)
bruce = 7
print(bruce)
"""
Explanation: Iteration
Multiple Assignments
It's legal to assign a new value to an existing variable. A new assignment makes an existing variable refer to a new value (and stop referring to the old value).
End of explanation
"""
a = 5
print("a is: ", a)
b = a # a and b are now equal
print("b is: ", b)
a = 3 # a and b are no longer equal
print("a is: ", a)
"""
Explanation: With multiple assignment it is especially important to distinguish between an assignment operation and a statement of equality. Because Python uses the equal sign (=) for assignment, it is tempting to interpret a statement like a = b as a statement of equality. It is not!
First, equality is a symmetric relation and assignment is not. For example, in mathematics, if a = 7 then 7 = a. But in Python, the statement a = 7 is legal and 7 = a is not.
Furthermore, in mathematics, a statement of equality is either true or false, for all time. If a = b now, then a will always equal b. In Python, an assignment statement can make two variables equal, but they don’t have to stay that way:
End of explanation
"""
x = x + 1
"""
Explanation: Updating Variables
One of the most common forms of multiple assignment is an update, where the new value of the variable depends on the old.
Python
x = x + 1
This means “get the current value of x, add one, and then update x with the new value.”
If you try to update a variable that doesn’t exist, you get an error, because Python evaluates the right side before it assigns a value to x:
End of explanation
"""
def countdown(n):
while n > 0:
print(n)
n = n - 1
print('Blastoff!')
countdown(10)
"""
Explanation: Before you can update a variable, you have to initialize it, usually with a simple assignment:
Python
x = 0
x = x + 1
Updating a variable by adding 1 is called an increment; subtracting 1 is called a decrement.
The while Statements
Computers are often used to automate repetitive tasks. Repeating identical or similar tasks without making errors is something that computers do well and people do poorly.
We have seen recursion to perform repetition, which is also called iteration. Because iteration is so common, Python provides several language features to make it easier, like for loops which we will cover later.
Another is the while statement. Here is a version of countdown that uses a while statement:
End of explanation
"""
while True:
line = input('> ')
if line == 'done':
break
print(line)
print('Done!')
"""
Explanation: You can almost read the while statement as if it were English. It means, “While n is greater than 0, display the value of n and then reduce the value of n by 1. When you get to 0, display the word Blastoff!”
Here is the flow of execution for a while statement:
Evaluate the condition, yielding True or False.
If the condition is false, exit the while statement and continue execution at the next statement.
If the condition is true, execute the body and then go back to step 1.
This type of flow is called a loop because the third step loops back around to the top.
break
Sometimes you don’t know it’s time to end a loop until you get half way through the body. In that case you can use the break statement to jump out of the loop.
For example, suppose you want to take input from the user until they type done. You could write:
End of explanation
"""
a = 4
x = 3
y = (x + a/x)/2
print(y)
"""
Explanation: The loop condition is True, which is always true, so the loop runs until it hits the break statement.
Each time through, it prompts the user with an angle bracket. If the user types done, the break statement exits the loop. Otherwise the program echoes whatever the user types and goes back to the top of the loop.
This way of writing while loops is common because you can check the condition anywhere in the loop (not just at the top) and you can express the stop condition affirmatively (“stop when this happens”) rather than negatively (“keep going until that happens.”).
Square Roots
Loops are often used in programs that compute numerical results by starting with an approximate answer and iteratively improving it.
For example, one way of computing square roots is Newton’s method. Suppose that you want to know the square root of a. If you start with almost any estimate, x, you can compute a better estimate with the following formula:
$$
y = \frac{x + a/x}{2}
$$
For example, if a is 4 and x is 3:
End of explanation
"""
x = y
y = (x + a/x)/2
print(y)
"""
Explanation: Which is closer to the correct answer $\sqrt{4} = 2$. If we repeat the process with the new estimate, it gets even closer:
End of explanation
"""
x = y
y = (x + a/x)/2
print(y)
x = y
y = (x + a/x)/2
print(y)
"""
Explanation: After a few more updates, the estimate is almost exact:
End of explanation
"""
a = 4
x = 3
while True:
print(x)
y = (x + a/x) / 2
if y == x:
break
x = y
"""
Explanation: In general we don’t know ahead of time how many steps it takes to get to the right answer, but we know when we get there because the estimate stops changing.
When y == x, we can stop. Here is a loop that starts with an initial estimate, x, and improves it until it stops changing:
End of explanation
"""
a = 2
x = 0.1
epsilon = 0.0000001
while True:
print(x)
y = (x + a/x) / 2
if abs(y-x) < epsilon:
break
x = y
"""
Explanation: For most values of a this works fine, but in general it is dangerous to test float equality. Floating-point values are only approximately right: most rational numbers, like 1/3, and irrational numbers, like $\sqrt{2}$ can’t be represented exactly with a float.
Rather than checking whether x and y are exactly equal, it is safer to use the built-in function abs to compute the absolute value, or magnitude, of the difference between them:
End of explanation
"""
|
google/starthinker | colabs/mapping.ipynb | apache-2.0 | !pip install git+https://github.com/google/starthinker
"""
Explanation: Column Mapping
Use sheet to define keyword to column mappings.
License
Copyright 2020 Google LLC,
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Disclaimer
This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.
This code generated (see starthinker/scripts for possible source):
- Command: "python starthinker_ui/manage.py colab"
- Command: "python starthinker/tools/colab.py [JSON RECIPE]"
1. Install Dependencies
First install the libraries needed to execute recipes, this only needs to be done once, then click play.
End of explanation
"""
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
"""
Explanation: 2. Get Cloud Project ID
To run this recipe requires a Google Cloud Project, this only needs to be done once, then click play.
End of explanation
"""
CLIENT_CREDENTIALS = 'PASTE CLIENT CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
"""
Explanation: 3. Get Client Credentials
To read and write to various endpoints requires downloading client credentials, this only needs to be done once, then click play.
End of explanation
"""
FIELDS = {
'auth_read': 'user', # Credentials used for reading data.
'sheet': '',
'tab': '',
'in_dataset': '',
'in_table': '',
'out_dataset': '',
'out_view': '',
}
print("Parameters Set To: %s" % FIELDS)
"""
Explanation: 4. Enter Column Mapping Parameters
For the sheet, provide the full URL.
A tab called <strong>Mapping</strong> will be created.
Follow the instructions in the tab to complete the mapping.
The in table should have the columns you want to map.
The out view will have the new columns created in the mapping.
Modify the values below for your use case, can be done multiple times, then click play.
End of explanation
"""
from starthinker.util.configuration import Configuration
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'mapping': {
'auth': 'user',
'sheet': {'field': {'name': 'sheet', 'kind': 'string', 'order': 1, 'default': ''}},
'tab': {'field': {'name': 'tab', 'kind': 'string', 'order': 2, 'default': ''}},
'in': {
'dataset': {'field': {'name': 'in_dataset', 'kind': 'string', 'order': 3, 'default': ''}},
'table': {'field': {'name': 'in_table', 'kind': 'string', 'order': 4, 'default': ''}}
},
'out': {
'dataset': {'field': {'name': 'out_dataset', 'kind': 'string', 'order': 7, 'default': ''}},
'view': {'field': {'name': 'out_view', 'kind': 'string', 'order': 8, 'default': ''}}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
"""
Explanation: 5. Execute Column Mapping
This does NOT need to be modified unless you are changing the recipe, click play.
End of explanation
"""
|
mne-tools/mne-tools.github.io | dev/_downloads/775a4c9edcb81275d5a07fdad54343dc/channel_epochs_image.ipynb | bsd-3-clause | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
"""
Explanation: Visualize channel over epochs as an image
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
Two images are produced, one with a good channel and one with a channel
that does not show any evoked field.
It is also demonstrated how to reorder the epochs using a 1D spectral
embedding as described in :footcite:GramfortEtAl2010.
End of explanation
"""
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# Create epochs, here for gradiometers + EOG only for simplicity
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
"""
Explanation: Set parameters
End of explanation
"""
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.manifold import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
"""
Explanation: Show event-related fields images
End of explanation
"""
|
bmorris3/salter | stats_vis.ipynb | mit | [n for n in table.colnames if n.startswith('ks')]
p = table['ttest:out_of_transit&before_midtransit-vs-out_of_transit&after_midtransit']
poorly_normalized_oot_threshold = -1
mask_poorly_normalized_oot = np.log(p) > poorly_normalized_oot_threshold
plt.hist(np.log(p[~np.isnan(p)]))
plt.axvline(poorly_normalized_oot_threshold, color='r')
plt.ylabel('freq')
plt.xlabel('log( Ttest(before-transit, after-transit) )')
plt.show()
"""
Explanation: If the mean flux before transit is significantly different from the mean flux after transit, mask those results.
End of explanation
"""
p = table['ks:out_of_transit&before_midtransit-vs-out_of_transit&after_midtransit']
mask_different_rms_before_vs_after_thresh = -1.5
mask_different_rms_before_vs_after = np.log(p) > mask_different_rms_before_vs_after_thresh
plt.hist(np.log(p[~np.isnan(p)]))
plt.axvline(mask_different_rms_before_vs_after_thresh, color='r')
plt.ylabel('freq')
plt.xlabel('log( KS(before-transit, after-transit) )')
plt.show()
combined_mask = mask_poorly_normalized_oot | mask_different_rms_before_vs_after
print("stars left after cuts:", np.count_nonzero(table['kepid'][combined_mask]))
ks_in_out = table['ks:in_transit-vs-out_of_transit']
b = table['B']
thresh = 0.001
mask_notable_intransit = ks_in_out < thresh
plt.scatter(np.log(ks_in_out), b)
plt.axvline(np.log(thresh), color='r')
ks_in_in = table['ks:in_transit&before_midtransit-vs-in_transit&after_midtransit']
anderson_in_in = table['anderson:in_transit&before_midtransit-vs-in_transit&after_midtransit']
b = table['B']
thresh = 0.05
mask_asymmetric_in = (ks_in_in < thresh) & (anderson_in_in < thresh)
print(table['kepid'][mask_asymmetric_in])
plt.scatter(np.log(ks_in_in), rb)
plt.axvline(np.log(thresh), color='r')
large_planets = table['R'].data > 0.1
close_in_planets = table['PER'] < 10
close_in_large_planets = (large_planets & close_in_planets) & combined_mask
far_out_small_planets = np.logical_not(close_in_large_planets) & combined_mask
np.count_nonzero(close_in_large_planets.data), np.count_nonzero(far_out_small_planets)
plt.hist(np.log(table['ks:in_transit-vs-out_of_transit'])[close_in_large_planets],
label='close in/large', alpha=0.4, normed=True)
plt.hist(np.log(table['ks:in_transit-vs-out_of_transit'])[far_out_small_planets],
label='far out/small', alpha=0.4, normed=True)
plt.legend()
plt.xlabel('log( KS(in vs. out) )')
plt.ylabel('Fraction of stars')
plt.title("Total activity")
plt.show()
"""
Explanation: If the distribution of fluxs before transit is significantly different from the distribution of fluxs after transit, mask those results.
End of explanation
"""
plt.hist(np.log(table['ks:in_transit&before_midtransit-vs-in_transit&after_midtransit'])[close_in_large_planets],
label='close in/large', alpha=0.4, normed=True)
plt.hist(np.log(table['ks:in_transit&before_midtransit-vs-in_transit&after_midtransit'])[far_out_small_planets],
label='far out/small', alpha=0.4, normed=True)
plt.legend()
plt.xlabel('log( KS(in-transit (first half) vs. in-transit (second half)) )')
plt.ylabel('Fraction of stars')
plt.title("Residual asymmetry")
plt.show()
"""
Explanation: It seems that close-in, large exoplanets orbit more active stars (with larger in-transit RMS) than far out/small planets
End of explanation
"""
plt.loglog(table['ks:in_transit-vs-out_of_transit'],
table['PER'], '.')
plt.xlabel('transit depth scatter: log(ks)')
plt.ylabel('period [d]')
plt.loglog(table['PER'][close_in_large_planets],
table['ks:in_transit-vs-out_of_transit'][close_in_large_planets], 'k.', label='close in & large')
plt.loglog(table['PER'][far_out_small_planets],
table['ks:in_transit-vs-out_of_transit'][far_out_small_planets], 'r.', label='far out | small')
plt.legend()
plt.ylabel('transit depth scatter: log(ks)')
plt.xlabel('period [d]')
ax = plt.gca()
ax.invert_yaxis()
"""
Explanation: Transit residuals are more asymmetric for far-out, small exoplanets.
End of explanation
"""
plt.semilogx(table['ks:in_transit-vs-out_of_transit'][close_in_large_planets],
table['B'][close_in_large_planets], 'k.', label='close in/large')
plt.semilogx(table['ks:in_transit-vs-out_of_transit'][far_out_small_planets],
table['B'][far_out_small_planets], 'r.', label='far out/small')
plt.legend()
ax = plt.gca()
ax.set_xlabel('transit depth scatter: log(ks)')
ax.set_ylabel('impact parameter $b$')
ax2 = ax.twinx()
y2 = 1 - np.linspace(0, 1, 5)
y2labels = np.degrees(np.arccos(y2))[::-1]
ax2.set_yticks(y2)
ax2.set_yticklabels([int(round(i)) for i in y2labels])
#ax2.set_ylim([0, 90])
ax2.set_ylabel('abs( latitude )')
def b_to_latitude_deg(b):
return 90 - np.degrees(np.arccos(b))
abs_latitude = b_to_latitude_deg(table['B'])
plt.semilogx(table['ks:in_transit-vs-out_of_transit'][close_in_large_planets],
abs_latitude[close_in_large_planets], 'k.', label='close in/large')
plt.semilogx(table['ks:in_transit-vs-out_of_transit'][far_out_small_planets],
abs_latitude[far_out_small_planets], 'r.', label='far out/small')
plt.legend()
ax = plt.gca()
ax.set_xlabel('in-transit asymmetry: log(ks)')
ax.set_ylabel('stellar latitude (assume aligned)')
from scipy.stats import binned_statistic
bs = binned_statistic(abs_latitude[far_out_small_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][far_out_small_planets]),
statistic='median', bins=10)
bincenter = 0.5 * (bs.bin_edges[:-1] + bs.bin_edges[1:])
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(abs_latitude[far_out_small_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][far_out_small_planets]),
'k.', label='far out/small')
ax[0].plot(bincenter, bs.statistic, label='median')
ax[0].invert_yaxis()
ax[0].set_ylabel('transit depth scatter: log(ks)')
ax[0].set_xlabel('stellar latitude (assume aligned)')
bs = binned_statistic(abs_latitude[close_in_large_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][close_in_large_planets]),
statistic='median', bins=10)
bincenter = 0.5 * (bs.bin_edges[:-1] + bs.bin_edges[1:])
ax[1].plot(abs_latitude[close_in_large_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][close_in_large_planets]),
'k.', label='far out/small')
ax[1].plot(bincenter, bs.statistic, label='median')
ax[1].invert_yaxis()
ax[1].set_ylabel('transit depth scatter: log(ks)')
ax[1].set_xlabel('stellar latitude (assume aligned)')
ax[0].set_title('Small | far out')
ax[1].set_title('large & close in')
from scipy.stats import binned_statistic
bs = binned_statistic(abs_latitude[far_out_small_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][far_out_small_planets]),
statistic='median', bins=10)
bincenter = 0.5 * (bs.bin_edges[:-1] + bs.bin_edges[1:])
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.plot(abs_latitude[far_out_small_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][far_out_small_planets]),
'k.', label='far out | small')
ax.plot(bincenter, bs.statistic, 'k', label='median(far out | small)')
ax.set_ylabel('transit depth scatter: log(ks)')
ax.set_xlabel('stellar latitude (assume aligned)')
bs = binned_statistic(abs_latitude[close_in_large_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][close_in_large_planets]),
statistic='median', bins=10)
bincenter = 0.5 * (bs.bin_edges[:-1] + bs.bin_edges[1:])
ax.plot(abs_latitude[close_in_large_planets],
np.log(table['ks:in_transit-vs-out_of_transit'][close_in_large_planets]),
'r.', label='close in & large')
ax.plot(bincenter, bs.statistic, 'r', label='median(close in & large)')
# ax.set_ylabel('transit depth scatter: log(ks)')
# ax.set_xlabel('stellar latitude (assume aligned)')
ax.legend()
ax.invert_yaxis()
ax.set_ylim([0, -150])
plt.show()
#ax.set_title('Small | far out')
#ax.set_title('large & close in')
"""
Explanation: Stars with short period planets have disproportionately larger scatter in transit
End of explanation
"""
|
jstac/quantecon_nyu_2016 | lecture7/Intro_to_pymc.ipynb | bsd-3-clause | %matplotlib inline
import numpy as np
import scipy as sp
import pymc as pm
import seaborn as sb
import matplotlib.pyplot as plt
"""
Explanation: Intorduction to PyMC2
Balint Szoke
Installation:
>> conda install pymc
End of explanation
"""
def sample_path(rho, sigma, T, y0=None):
'''
Simulates the sample path for y of length T+1 starting from a specified initial value OR if y0
is None, it initializes the path with a draw from the stationary distribution of y.
Arguments
-----------------
rho (Float) : AR coefficient
sigma (Float) : standard deviation of the error
T (Int) : length of the sample path without x0
y0 (Float) : initial value of X
Return:
-----------------
y_path (Numpy Array) : simulated path
'''
if y0 == None:
stdev_erg = sigma / np.sqrt(1 - rho**2)
y0 = np.random.normal(0, stdev_erg)
y_path = np.empty(T+1)
y_path[0] = y0
eps_path = np.random.normal(0, 1, T)
for t in range(T):
y_path[t + 1] = rho * y_path[t] + sigma * eps_path[t]
return y_path
#-------------------------------------------------------
# Pick true values:
rho_true, sigma_x_true, T = 0.5, 1.0, 20
#np.random.seed(1453534)
sample = sample_path(rho_true, sigma_x_true, T)
"""
Explanation: Probabilistic model
Suppose you have a sample ${y_t}_{t=0}^{T}$ and want to characeterize it by the following probabilistic model; for $t\geq 0$
$$ y_{t+1} = \rho y_t + \sigma_x \varepsilon_{t+1}, \quad \varepsilon_{t+1}\stackrel{iid}{\sim}\cal{N}(0,1) $$
with the initial value $y_0 \sim {\cal N}\left(0, \frac{\sigma_x^2}{1-\rho^2}\right)$ and suppose the following (independent) prior beliefs for the parameters $\theta \equiv (\rho, \sigma_x)$
- $\rho \sim \text{U}(-1, 1)$
- $\sigma_x \sim \text{IG}(a, b)$
Aim: given the statistical model and the prior $\pi(\theta)$ we want to ''compute'' the posterior distribution $p\left( \theta \hspace{1mm} | \hspace{1mm} y^T \right)$ associated with the sample $y^T$.
How: if no conjugate form available, sample from $p\left( \theta \hspace{1mm} | \hspace{1mm} y^T \right)$ and learn about the posterior's properties from that sample
Remark: We go from the prior $\pi$ to the posterior $p$ by using Bayes rule:
\begin{equation}
p\left( \theta \hspace{1mm} | \hspace{1mm} y^T \right) = \frac{f( y^T \hspace{1mm}| \hspace{1mm}\theta) \pi(\theta) }{f( y^T)}
\end{equation}
The first-order autoregression implies that the likelihood function of $y^T$ can be factored as follows:
$$ f(y^T \hspace{1mm}|\hspace{1mm} \theta) = f(y_T| y_{T-1}; \theta)\cdot f(y_{T-1}| y_{T-2}; \theta) \cdots f(y_1 | y_0;\theta )\cdot f(y_0 |\theta) $$
where for all $t\geq 1$
$$ f(y_t | y_{t-1}; \theta) = {\mathcal N}(\rho y_{t-1}, \sigma_x^2) = {\mathcal N}(\mu_t, \sigma_x^2)$$
Generate a sample with $T=100$ for known parameter values:
$$\rho = 0.5\quad \sigma_x = 1.0$$
End of explanation
"""
# Priors:
rho = pm.Uniform('rho', lower = -1, upper = 1) # note the capitalized distribution name (rule for pymc distributions)
sigma_x = pm.InverseGamma('sigma_x', alpha = 3, beta = 1)
# random() method
print('Initialization:')
print("Current value of rho = {: f}".format(rho.value.reshape(1,)[0]))
print("Current logprob of rho = {: f}".format(rho.logp))
rho.random()
print('\nAfter redrawing:')
print("Current value of rho = {: f}".format(rho.value.reshape(1,)[0]))
print("Current logprob of rho = {: f}".format(rho.logp))
"""
Explanation: Probabilistic models in pymc
Model instance $\approx$ collection of random variables linked together according to some rules
Linkages (hierarchical structure):
parent: variables that influence another variable
e.g. $\rho$ and $\sigma_x$ are parents of $y_0$, $a$ and $b$ are parents of $sigma_x$
child: variables that are affected by other variables (subjects of parent variables)
e.g. $y_t$ is a child of $y_{t-1}$, $\rho$ and $\sigma_x$
Why are they useful?
child variable's current value automatically changes whenever its parents' values change
Random variables:
have a value attribute producing the current internal value (given the values of the parents)
computed on-demand and cached for efficiency.
other important attributes: parents (gives dictionary), children (gives a set)
Two main classes of random variables in pymc:
1) Stochastic variable:
variable whose value is not completely determined by its parents
Examples:
parameters with a given distribution
observable variables (data) = particular realizations of a random variable (see below)
treated by the back end as random number generators (see built-in random() method)
logp attribute: evaluate the logprob (mass or density) at the current value; for vector-valued variables it returns the sum of the (joint) logprob
Initialization:
define the distribution (built-in or your own) with name + params of the distribution (can be pymc variable)
optional flags:
value: for a default initial value; if not specified, initialized by a draw from the given distribution
size: for multivariate array of independent stochastic variables. (Alternatively: use array as a distribution parameter)
Initialize stochastic variables
End of explanation
"""
@pm.deterministic(trace = False)
def y0_stdev(rho = rho, sigma = sigma_x):
return sigma / np.sqrt(1 - rho**2)
# Alternatively:
#y0_stdev = pm.Lambda('y0_stdev', lambda r = rho, s = sigma_x: s / np.sqrt(1 - r**2) )
"""
Explanation: 2) Determinsitic variable:
variable that is entirely determined by its parents
''exact functions'' of stochastic variables, however, we can treat them as a variable and not a Python function.
Examples:
model implied restrictions on how the parameters and the observable variables are related
$\text{var}(y_0)$ is a function of $\rho$ and $\sigma_x$
$\mu_{t}$ is an exact function of $\rho$ and $y_{t-1}$
sample statistics, i.e. deterministic functions of the sample
Initialization:
decorator form:
Python function of stochastic variables AND default values + the decorator pm.deterministic
elementary operations (no need to write a function or decorate): $+$, $-$, $*$, $/$
pymc.Lambda
Initialize deterministic variables:
(a) Standard deviation of $y_0$ is a deterministic function of $\rho$ and $\sigma$
End of explanation
"""
# For elementary operators simply write
mu_y = rho * sample[:-1]
print(type(mu_y))
# You could also write, to generate a list of Determinisitc functions
#MU_y = [rho * sample[j] for j in range(T)]
#print(type(MU_y))
#print(type(MU_y[1]))
#MU_y = pm.Container(MU_y)
#print(type(MU_y))
"""
Explanation: (b) Conditional mean of $y_t$, $\mu_y$, is a deterministic function of $\rho$ and $y_{t-1}$
End of explanation
"""
y0_stdev.parents
"""
Explanation: Let's see the parents of y0_stdev...
End of explanation
"""
y0_stdev.parents['rho'].value
rho.random()
y0_stdev.parents['rho'].value # if the parent is a pymc variable, the current value will be always 'updated'
"""
Explanation: Notice that this is a dictionary, so for example...
End of explanation
"""
print("Current value of y0_stdev = {: f}".format(y0_stdev.value))
rho.random()
print('\nAfter redrawing rho:')
print("Current value of y0_stdev = {: f}".format(y0_stdev.value))
"""
Explanation: ... and as we alter the parent's value, the child's value changes accordingly
End of explanation
"""
print("Current value of mu_y:")
print(mu_y.value[:4])
rho.random()
print('\nAfter redrawing rho:')
print("Current value of mu_y:")
print(mu_y.value[:4])
"""
Explanation: and similarly for mu_y
End of explanation
"""
y0 = pm.Normal('y0', mu = 0.0, tau = 1 / y0_stdev, observed = True, value = sample[0])
Y = pm.Normal('Y', mu = mu_y, tau = 1 / sigma_x, observed=True, value = sample[1:])
Y.value
"""
Explanation: How to tell pymc what you 'know' about the data?
We define the data as a stochastic variable with fixed values and set the observed flag equal to True
For the sample $y^T$, depending on the question at hand, we might want to define
- either $T + 1$ scalar random variables
- or a scalar $y_0$ and a $T$-vector valued $Y$
In the current setup, as we fix the value of $y$ (observed), it doesn't really matter (approach A is easier). However, if we have an array-valued stochastic variable with mutable value, the restriction that we cannot update the values of stochastic variables' in-place becomes onerous in the sampling step (where the step method should propose array-valued variable). Straight from the pymc documentation:
''In this case, it may be preferable to partition the variable into several scalar-valued variables stored in an array or list.''
(A) $y_0$ as a scalar and $Y$ as a vector valued random variable
End of explanation
"""
Y.parents['tau'].value
sigma_x.random()
print(Y.parents['tau'].value)
Y.value
"""
Explanation: Notice that the value of this variable is fixed (even if the parent's value changes)
End of explanation
"""
Y_alt = np.empty(T + 1, dtype = object)
Y_alt[0] = y0 # definition of y0 is the same as above
for i in range(1, T + 1):
Y_alt[i] = pm.Normal('y_{:d}'.format(i), mu = mu_y[i-1], tau = 1 / sigma_x)
print(type(Y_alt))
Y_alt
"""
Explanation: (B) $T+1$ scalar random variables
Define an array with dtype=object, fill it with scalar variables (use loops) and define it as a pymc.Container (this latter step is not necessary, but based on my experience Container types work much more smoothly in the blocking step when we are sampling).
End of explanation
"""
Y_alt = pm.Container(Y_alt)
type(Y_alt)
"""
Explanation: Currently, this is just a numpy array of pymc.Deterministic functions. We can make it a pymc object by using the pymc.Container type.
End of explanation
"""
ar1_model = pm.Model([rho, sigma_x, y0, Y, y0_stdev, mu_y])
ar1_model.stochastics # notice that this is an unordered set (!)
ar1_model.deterministics
"""
Explanation: and the pymc methods are applied element-wise.
Create a pymc.Model instance
Remember that it is just a collection of random variables (Stochastic and Deterministic), hence
End of explanation
"""
M = pm.MCMC(ar1_model)
"""
Explanation: This object have very limited awareness of the structure of the probabilistic model that it describes and does not itslef possess methods for updating the values in the sampling methods.
Fitting the model to the data (MCMC algorithm)
MCMC algorithms
The joint prior distribution is sitting on an $N$-dimensional space, where $N$ is the number of parameters we are about to make inference on (see the figure below). Looking at the data through the probabilistic model deform the prior surface into the posterior surface, that we need to explore. In principle, we could naively search this space by picking random points in $\mathbb{R}^N$ and calculate the corresponding posterior value (Monte Carlo methods), but a more efficient (especially in higher dimensions) way is to do Markov Chain Monte Carlo (MCMC), which is basically an intelligent way of discovering the posterior surface.
MCMC is an iterative procedure: at every iteration, it proposes a nearby point in the space, then ask 'how likely that this point is close to the maximizer of the posterior surface?', it accepts the proposed point if the likelihood exceeds a particular level and rejects it otherwise (by going back to the old position). The key feature of MCMC is that it produces proposals by simulating a Markov chain for which the posterior is the unique, invariant limiting distribution. In other words, after a possible 'trasition period' (i.e. post converegence), it starts producing draws from the posterior.
MCMC algorithm in pymc
By default it uses the Metropolis-within-Gibbs algorithm (in my oppinion), which is based on two simple principles:
1. Blocking and conditioning:
- Divide the $N$ variables of $\theta$ into $K\leq N$ blocks and update every block by sampling from the conditional density, i.e. from the distribuition of the block parameters conditioned on all parameters in the other $K-1$ blocks being at their current values.
* At scan $t$, cycle through the $K$ blocks
$$\theta^{(t)} = [\theta^{(t)}1, \theta^{(t)}_2, \theta^{(t)}_3, \dots, \theta^{(t)}_K] $$
* Sample from the conditionals
\begin{align}
\theta_1^{(t+1)} &\sim f(\theta_1\hspace{1mm} | \hspace{1mm} \theta^{(t)}_2, \theta^{(t)}_3, \dots, \theta^{(t)}_K; \text{data}) \
\theta_2^{(t+1)} &\sim f(\theta_2\hspace{1mm} | \hspace{1mm} \theta^{(t+1)}_1, \theta^{(t)}_3, \dots, \theta^{(t)}_K; \text{data}) \
\theta_3^{(t+1)} &\sim f(\theta_3\hspace{1mm} | \hspace{1mm} \theta^{(t+1)}_1, \theta^{(t+1)}_2, \dots, \theta^{(t)}_K; \text{data}) \
\dots & \
\theta_K^{(t+1)} &\sim f(\theta_3\hspace{1mm} | \hspace{1mm} \theta^{(t+1)}_1, \theta^{(t+1)}_2, \dots, \theta^{(t+1)}{K-1}; \text{data})
\end{align}
Sampling (choose/construct pymc.StepMethod): if for a given block the conditional density $f$ can be expressed in (semi-)analytic form, use it, if not, use Metrololis-Hastings
Semi-closed form example: Foreward-backward sampler (Carter and Kohn, 1994):
Metropolis(-Hastings) algorithm:
Start at $\theta$
Propose a new point in the parameterspace according to some proposal density $J(\theta' | \theta)$ (e.g. random walk)
Accept the proposed point with probability
$$\alpha = \min\left( 1, \frac{p(\theta'\hspace{1mm} |\hspace{1mm} \text{data})\hspace{1mm} J(\theta \hspace{1mm}|\hspace{1mm} \theta')}{ p(\theta\hspace{1mm} |\hspace{1mm} \text{data})\hspace{1mm} J(\theta' \hspace{1mm}| \hspace{1mm}\theta)} \right) $$
If accept: Move to the proposed point $\theta'$ and return to Step 1.
If reject: Don't move, keep the point $\theta$ and return to Step 1.
After a large number of iterations (once the Markov Chain convereged), return all accepted $\theta$ as a sample from the posterior
Again, a pymc.Model instance is not much more than a collection, for example, the model variables (blocks) are not matched with step methods determining how to update values in the sampling step. In order to do that, first we need to construct an MCMC instance, which is then ready to be sampled from.
MCMC‘s primary job is to create and coordinate a collection of step methods, each of which is responsible for updating one or more variables (blocks) at each step of the MCMC algorithm. By default, step methods are automatically assigned to variables by PyMC (after we call the sample method).
Main built-in pymc.StepMethods
* Metropolis
* AdaptiveMetropolis
* Slicer
* Gibbs
you can assign step methods manually by calling the method use_step_method(method, *args, **kwargs):
End of explanation
"""
M.step_method_dict
"""
Explanation: Notice that the step_methods are not assigned yet
End of explanation
"""
# draw a sample of size 20,000, drop the first 1,000 and keep only every 5th draw
M.sample(iter = 50000, burn = 1000, thin = 5)
"""
Explanation: You can specify them now, or if you call the sample method, pymc will assign the step_methods automatically according to some rule
End of explanation
"""
M.step_method_dict
"""
Explanation: ... and you can check what kind of step methods have been assigned (the default in most cases is the Metropolis step method for non-observed stochastic variables, while in case of observed stochastics, we simply draw from the prior)
End of explanation
"""
M.trace('rho')[:20]
M.trace('sigma_x')[:].shape
"""
Explanation: The sample can be reached by the trace method (use the names you used at the initialization not the python name -- useful if the two coincide)
End of explanation
"""
sigma_sample = M.trace('sigma_x')[:]
rho_sample = M.trace('rho')[:]
fig, ax = plt. subplots(1, 2, figsize = (15, 5))
ax[0].plot(sigma_sample)
ax[1].hist(sigma_sample)
"""
Explanation: Then this is just a numpy array, so you can do different sort of things with it. For example plot
End of explanation
"""
from pymc.Matplot import plot as fancy_plot
fancy_plot(M.trace('rho'))
"""
Explanation: Acutally, you don't have to waste your time on construction different subplots. pymc's built-in plotting functionality creates pretty informative plots for you (baed on matplotlib). On the figure below
- Upper left subplot: trace,
- Lower left subplot: autocorrelation (try to resample the model with thin=1),
- Right subplot: histogram with the mean
End of explanation
"""
M.stats('rho')
# Try also:
#M.summary()
N = len(rho_sample)
rho_pr = [rho.random() for i in range(N)]
sigma_pr = [sigma_x.random() for i in range(N)]
Prior = np.vstack([rho_pr, sigma_pr]).T
Posterior = np.vstack([rho_sample, sigma_sample]).T
fig, bx = plt.subplots(1, 2, figsize = (17, 10), sharey = True)
sb.kdeplot(Prior, shade = True, cmap = 'PuBu', ax = bx[0])
bx[0].patch.set_facecolor('white')
bx[0].collections[0].set_alpha(0)
bx[0].axhline(y = sigma_x_true, color = 'DarkRed', lw =2)
bx[0].axvline(x = rho_true, color = 'DarkRed', lw =2)
bx[0].set_xlabel(r'$\rho$', fontsize = 18)
bx[0].set_ylabel(r'$\sigma_x$', fontsize = 18)
bx[0].set_title('Prior', fontsize = 20)
sb.kdeplot(Posterior, shade = True, cmap = 'PuBu', ax = bx[1])
bx[1].patch.set_facecolor('white')
bx[1].collections[0].set_alpha(0)
bx[1].axhline(y = sigma_x_true, color = 'DarkRed', lw =2)
bx[1].axvline(x = rho_true, color = 'DarkRed', lw =2)
bx[1].set_xlabel(r'$\rho$', fontsize = 18)
bx[1].set_ylabel(r'$\sigma_x$', fontsize = 18)
bx[1].set_title('Posterior', fontsize = 20)
plt.xlim(-1, 1)
plt.ylim(0, 1.5)
plt.tight_layout()
plt.savefig('beamer/prior_post.pdf')
rho_grid = np.linspace(-1, 1, 100)
sigmay_grid = np.linspace(0, 1.5, 100)
U = sp.stats.uniform(-1, 2)
IG = sp.stats.invgamma(3)
fig2, cx = plt.subplots(2, 2, figsize = (17, 12), sharey = True)
cx[0, 0].plot(rho_grid, U.pdf(rho_grid), 'r-', lw = 3, alpha = 0.6, label = r'$\rho$ prior')
cx[0, 0].set_title(r"Marginal prior for $\rho$", fontsize = 18)
cx[0, 0].axvline(x = rho_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\rho$')
cx[0, 0].legend(loc='best', fontsize = 16)
cx[0, 0].set_xlim(-1, 1)
sb.distplot(rho_sample, ax = cx[0,1], kde_kws={"color": "r", "lw": 3, "label": r"$\rho$ posterior"})
cx[0, 1].set_title(r"Marginal posterior for $\rho$", fontsize = 18)
cx[0, 1].axvline(x = rho_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\rho$')
cx[0, 1].legend(loc='best', fontsize = 16)
cx[0, 1].set_xlim(-1, 1)
cx[1, 0].plot(sigmay_grid, IG.pdf(sigmay_grid), 'r-', lw=3, alpha=0.6, label=r'$\sigma_y$ prior')
cx[1, 0].set_title(r"Marginal prior for $\sigma_y$", fontsize = 18)
cx[1, 0].axvline(x = sigma_x_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\sigma_y$')
cx[1, 0].legend(loc = 'best', fontsize = 16)
cx[1, 0].set_xlim(0, 3)
sb.distplot(sigma_sample, ax = cx[1,1], kde_kws={"color": "r", "lw": 3, "label": r"$\sigma_y$ posterior"})
cx[1, 1].set_title(r"Marginal posterior for $\sigma_y$", fontsize = 18)
cx[1, 1].axvline(x = sigma_x_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\sigma_y$')
cx[1, 1].legend(loc = 'best', fontsize = 16)
cx[1, 1].set_xlim(0, 3)
plt.tight_layout()
plt.savefig('beamer/marginal_prior_post.pdf')
"""
Explanation: For a non-graphical summary of the posterior use the stats() method
End of explanation
"""
|
tensorflow/docs-l10n | site/zh-cn/quantum/tutorials/qcnn.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
!pip install tensorflow==2.4.1
"""
Explanation: 量子卷积神经网络
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://tensorflow.google.cn/quantum/tutorials/qcnn"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 上查看</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/quantum/tutorials/qcnn.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/quantum/tutorials/qcnn.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 GitHub 上查看源代码</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/quantum/tutorials/qcnn.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td>
</table>
本教程介绍如何实现一个简化的<a href="https://www.nature.com/articles/s41567-019-0648-8" class="external">量子卷积神经网络</a> (QCNN),即对同样具有平移不变性的经典卷积神经网络的提议量子模拟。
本示例演示如何检测量子数据源的某些属性,例如设备的量子传感器或复杂模拟。量子数据源是可能有或可能没有激发的<a href="https://arxiv.org/pdf/quant-ph/0504097.pdf" class="external">簇态</a>,后者是 QCNN 将学习检测的对象(论文中使用的数据集是 SPT 阶段分类)。
设置
End of explanation
"""
!pip install tensorflow-quantum
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
"""
Explanation: 安装 TensorFlow Quantum:
End of explanation
"""
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
"""
Explanation: 现在,导入 TensorFlow 和模块依赖项:
End of explanation
"""
qubit = cirq.GridQubit(0, 0)
# Define some circuits.
circuit1 = cirq.Circuit(cirq.X(qubit))
circuit2 = cirq.Circuit(cirq.H(qubit))
# Convert to a tensor.
input_circuit_tensor = tfq.convert_to_tensor([circuit1, circuit2])
# Define a circuit that we want to append
y_circuit = cirq.Circuit(cirq.Y(qubit))
# Instantiate our layer
y_appender = tfq.layers.AddCircuit()
# Run our circuit tensor through the layer and save the output.
output_circuit_tensor = y_appender(input_circuit_tensor, append=y_circuit)
"""
Explanation: 1. 构建 QCNN
1.1 在 TensorFlow 计算图中组装电路
TensorFlow Quantum (TFQ) 提供了专为计算图中的电路构造而设计的层类。一个示例是从 tf.keras.Layer 继承的 tfq.layers.AddCircuit 层。此层可以追加或附加到电路的输入批次,如下图所示。
<img src="./images/qcnn_1.png" width="700">
下面的代码段使用了此层:
End of explanation
"""
print(tfq.from_tensor(input_circuit_tensor))
"""
Explanation: 检查输入张量:
End of explanation
"""
print(tfq.from_tensor(output_circuit_tensor))
"""
Explanation: 检查输出张量:
End of explanation
"""
def generate_data(qubits):
"""Generate training and testing data."""
n_rounds = 20 # Produces n_rounds * n_qubits datapoints.
excitations = []
labels = []
for n in range(n_rounds):
for bit in qubits:
rng = np.random.uniform(-np.pi, np.pi)
excitations.append(cirq.Circuit(cirq.rx(rng)(bit)))
labels.append(1 if (-np.pi / 2) <= rng <= (np.pi / 2) else -1)
split_ind = int(len(excitations) * 0.7)
train_excitations = excitations[:split_ind]
test_excitations = excitations[split_ind:]
train_labels = labels[:split_ind]
test_labels = labels[split_ind:]
return tfq.convert_to_tensor(train_excitations), np.array(train_labels), \
tfq.convert_to_tensor(test_excitations), np.array(test_labels)
"""
Explanation: 虽然不使用 tfq.layers.AddCircuit 也可以运行下面的示例,但这是一个理解如何将复杂的功能嵌入 TensorFlow 计算图的好机会。
1.2 问题概述
您将准备簇态,并训练一个量子分类器来检测它是否处于“激发”状态。簇态是高度纠缠的,不过,这对经典计算机而言并非难以解决的问题。为了让您更清楚地理解,我们使用的这一数据集比论文中使用的更简单。
对于此分类任务,您将实现一个类似深度 <a href="https://arxiv.org/pdf/quant-ph/0610099.pdf" class="external">MERA</a> 的 QCNN 架构,因为:
就像 QCNN 一样,环上的簇态具有平移不变性。
簇态是高度纠缠的。
这种架构在减少纠缠方面应该会很有效,通过读出一个量子位来获得分类。
<img src="./images/qcnn_2.png" width="1000">
根据定义,“激发”簇态是指将 cirq.rx 门应用到其任何量子位的簇态。Qconv 和 QPool 在本教程的后续部分讨论。
1.3 为 TensorFlow 构建块
<img src="./images/qcnn_3.png" width="1000">
使用 TensorFlow Quantum 解决此问题的一种方式是实现以下几点:
模型的输入是一个电路张量,空电路或表明激发的特定量子位上的 X 门。
使用 tfq.layers.AddCircuit 层构造模型的其他量子组件。
使用 tfq.layers.PQC 层进行推断。它会读取 $\langle \hat{Z} \rangle$,并将其与激发态的标签 1 或非激发态的标签 -1 进行比较。
1.4 数据
在构建模型之前,您可以生成数据。在本例中,它将是簇态的激发(原论文使用的是一个更复杂的数据集)。激发通过 cirq.rx 门表示。我们将足够大的旋转视为激发,并使用 1 进行标记,不够大的旋转则使用 -1 标记,我们将其视为未激发。
End of explanation
"""
sample_points, sample_labels, _, __ = generate_data(cirq.GridQubit.rect(1, 4))
print('Input:', tfq.from_tensor(sample_points)[0], 'Output:', sample_labels[0])
print('Input:', tfq.from_tensor(sample_points)[1], 'Output:', sample_labels[1])
"""
Explanation: 您可以看到,就像使用常规的机器学习一样,您创建了一个用于对模型进行基准测试的训练和测试集。利用以下代码段,您可以快速查看某些数据点:
End of explanation
"""
def cluster_state_circuit(bits):
"""Return a cluster state on the qubits in `bits`."""
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(bits))
for this_bit, next_bit in zip(bits, bits[1:] + [bits[0]]):
circuit.append(cirq.CZ(this_bit, next_bit))
return circuit
"""
Explanation: 1.5 定义层
现在,我们在 TensorFlow 中定义上图中显示的层。
1.5.1 簇态
第一步是使用 <a href="https://github.com/quantumlib/Cirq" class="external">Cirq</a>(Google 为量子电路编程提供的框架)定义<a href="https://arxiv.org/pdf/quant-ph/0504097.pdf" class="external">簇态</a>。由于这是模型的一个静态部分,因此使用 tfq.layers.AddCircuit 功能将其嵌入。
End of explanation
"""
SVGCircuit(cluster_state_circuit(cirq.GridQubit.rect(1, 4)))
"""
Explanation: 显示 <a href="https://cirq.readthedocs.io/en/stable/generated/cirq.GridQubit.html" class="external"><code>cirq.GridQubit</code></a> 的矩形的一个簇态电路:
End of explanation
"""
def one_qubit_unitary(bit, symbols):
"""Make a Cirq circuit enacting a rotation of the bloch sphere about the X,
Y and Z axis, that depends on the values in `symbols`.
"""
return cirq.Circuit(
cirq.X(bit)**symbols[0],
cirq.Y(bit)**symbols[1],
cirq.Z(bit)**symbols[2])
def two_qubit_unitary(bits, symbols):
"""Make a Cirq circuit that creates an arbitrary two qubit unitary."""
circuit = cirq.Circuit()
circuit += one_qubit_unitary(bits[0], symbols[0:3])
circuit += one_qubit_unitary(bits[1], symbols[3:6])
circuit += [cirq.ZZ(*bits)**symbols[6]]
circuit += [cirq.YY(*bits)**symbols[7]]
circuit += [cirq.XX(*bits)**symbols[8]]
circuit += one_qubit_unitary(bits[0], symbols[9:12])
circuit += one_qubit_unitary(bits[1], symbols[12:])
return circuit
def two_qubit_pool(source_qubit, sink_qubit, symbols):
"""Make a Cirq circuit to do a parameterized 'pooling' operation, which
attempts to reduce entanglement down from two qubits to just one."""
pool_circuit = cirq.Circuit()
sink_basis_selector = one_qubit_unitary(sink_qubit, symbols[0:3])
source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])
pool_circuit.append(sink_basis_selector)
pool_circuit.append(source_basis_selector)
pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))
pool_circuit.append(sink_basis_selector**-1)
return pool_circuit
"""
Explanation: 1.5.2 QCNN 层
按照 <a href="https://arxiv.org/abs/1810.03787" class="external">Cong 和 Lukin QCNN 论文</a>定义组成模型的层。这需要具备几个前提条件:
<a href="https://arxiv.org/abs/quant-ph/0507171" class="external">Tucci 论文</a>中提出的单或双量子位参数化酉矩阵。
一个通用的参数化双量子位池化运算。
End of explanation
"""
SVGCircuit(one_qubit_unitary(cirq.GridQubit(0, 0), sympy.symbols('x0:3')))
"""
Explanation: 要查看您创建的对象,请打印出单量子位酉电路:
End of explanation
"""
SVGCircuit(two_qubit_unitary(cirq.GridQubit.rect(1, 2), sympy.symbols('x0:15')))
"""
Explanation: 以及双量子位酉电路:
End of explanation
"""
SVGCircuit(two_qubit_pool(*cirq.GridQubit.rect(1, 2), sympy.symbols('x0:6')))
"""
Explanation: 以及双量子位池化电路:
End of explanation
"""
def quantum_conv_circuit(bits, symbols):
"""Quantum Convolution Layer following the above diagram.
Return a Cirq circuit with the cascade of `two_qubit_unitary` applied
to all pairs of qubits in `bits` as in the diagram above.
"""
circuit = cirq.Circuit()
for first, second in zip(bits[0::2], bits[1::2]):
circuit += two_qubit_unitary([first, second], symbols)
for first, second in zip(bits[1::2], bits[2::2] + [bits[0]]):
circuit += two_qubit_unitary([first, second], symbols)
return circuit
"""
Explanation: 1.5.2.1 量子卷积
按照 <a href="https://arxiv.org/abs/1810.03787" class="external">Cong 和 Lukin</a> 的论文所述,将一维量子卷积定义为对每对步长为 1 的相邻量子位的双量子位参数化酉的应用。
End of explanation
"""
SVGCircuit(
quantum_conv_circuit(cirq.GridQubit.rect(1, 8), sympy.symbols('x0:15')))
"""
Explanation: 显示(高度水平的)电路:
End of explanation
"""
def quantum_pool_circuit(source_bits, sink_bits, symbols):
"""A layer that specifies a quantum pooling operation.
A Quantum pool tries to learn to pool the relevant information from two
qubits onto 1.
"""
circuit = cirq.Circuit()
for source, sink in zip(source_bits, sink_bits):
circuit += two_qubit_pool(source, sink, symbols)
return circuit
"""
Explanation: 1.5.2.2 量子池化
量子池化层使用上面定义的双量子位池从 $N$ 个量子位池化为 $\frac{N}{2}$ 个量子位。
End of explanation
"""
test_bits = cirq.GridQubit.rect(1, 8)
SVGCircuit(
quantum_pool_circuit(test_bits[:4], test_bits[4:], sympy.symbols('x0:6')))
"""
Explanation: 检查池化组件电路:
End of explanation
"""
def create_model_circuit(qubits):
"""Create sequence of alternating convolution and pooling operators
which gradually shrink over time."""
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:63')
# Cirq uses sympy.Symbols to map learnable variables. TensorFlow Quantum
# scans incoming circuits and replaces these with TensorFlow variables.
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
model_circuit += quantum_conv_circuit(qubits[4:], symbols[21:36])
model_circuit += quantum_pool_circuit(qubits[4:6], qubits[6:],
symbols[36:42])
model_circuit += quantum_conv_circuit(qubits[6:], symbols[42:57])
model_circuit += quantum_pool_circuit([qubits[6]], [qubits[7]],
symbols[57:63])
return model_circuit
# Create our qubits and readout operators in Cirq.
cluster_state_bits = cirq.GridQubit.rect(1, 8)
readout_operators = cirq.Z(cluster_state_bits[-1])
# Build a sequential model enacting the logic in 1.3 of this notebook.
# Here you are making the static cluster state prep as a part of the AddCircuit and the
# "quantum datapoints" are coming in the form of excitation
excitation_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state = tfq.layers.AddCircuit()(
excitation_input, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model = tfq.layers.PQC(create_model_circuit(cluster_state_bits),
readout_operators)(cluster_state)
qcnn_model = tf.keras.Model(inputs=[excitation_input], outputs=[quantum_model])
# Show the keras plot of the model
tf.keras.utils.plot_model(qcnn_model,
show_shapes=True,
show_layer_names=False,
dpi=70)
"""
Explanation: 1.6 模型定义
现在,使用定义的层构造纯量子 CNN。首先创建八个量子位,再将其池化为一个量子位,然后测量 $\langle \hat{Z} \rangle$。
End of explanation
"""
# Generate some training data.
train_excitations, train_labels, test_excitations, test_labels = generate_data(
cluster_state_bits)
# Custom accuracy metric.
@tf.function
def custom_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true)
y_pred = tf.map_fn(lambda x: 1.0 if x >= 0 else -1.0, y_pred)
return tf.keras.backend.mean(tf.keras.backend.equal(y_true, y_pred))
qcnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
history = qcnn_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations, test_labels))
plt.plot(history.history['loss'][1:], label='Training')
plt.plot(history.history['val_loss'][1:], label='Validation')
plt.title('Training a Quantum CNN to Detect Excited Cluster States')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
"""
Explanation: 1.7 训练模型
在整个批次上训练模型以简化此示例。
End of explanation
"""
# 1-local operators to read out
readouts = [cirq.Z(bit) for bit in cluster_state_bits[4:]]
def multi_readout_model_circuit(qubits):
"""Make a model circuit with less quantum pool and conv operations."""
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:21')
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
return model_circuit
# Build a model enacting the logic in 2.1 of this notebook.
excitation_input_dual = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_dual = tfq.layers.AddCircuit()(
excitation_input_dual, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model_dual = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_dual)
d1_dual = tf.keras.layers.Dense(8)(quantum_model_dual)
d2_dual = tf.keras.layers.Dense(1)(d1_dual)
hybrid_model = tf.keras.Model(inputs=[excitation_input_dual], outputs=[d2_dual])
# Display the model architecture
tf.keras.utils.plot_model(hybrid_model,
show_shapes=True,
show_layer_names=False,
dpi=70)
"""
Explanation: 2. 混合模型
您不必使用量子卷积将八个量子位池化为一个量子位,您可以执行一到两轮的量子卷积,然后将结果馈送到经典神经网络中。本部分探讨量子-经典混合模型。
2.1 使用单个量子滤波器的混合模型
应用一层量子卷积,在后跟密集连接的神经网络的所有位上读出 $\langle \hat{Z}_n \rangle$。
<img src="./images/qcnn_5.png" width="1000">
2.1.1 模型定义
End of explanation
"""
hybrid_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
hybrid_history = hybrid_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'], label='Hybrid CNN')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
plt.show()
"""
Explanation: 2.1.2 训练模型
End of explanation
"""
excitation_input_multi = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_multi = tfq.layers.AddCircuit()(
excitation_input_multi, prepend=cluster_state_circuit(cluster_state_bits))
# apply 3 different filters and measure expectation values
quantum_model_multi1 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi2 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi3 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
# concatenate outputs and feed into a small classical NN
concat_out = tf.keras.layers.concatenate(
[quantum_model_multi1, quantum_model_multi2, quantum_model_multi3])
dense_1 = tf.keras.layers.Dense(8)(concat_out)
dense_2 = tf.keras.layers.Dense(1)(dense_1)
multi_qconv_model = tf.keras.Model(inputs=[excitation_input_multi],
outputs=[dense_2])
# Display the model architecture
tf.keras.utils.plot_model(multi_qconv_model,
show_shapes=True,
show_layer_names=True,
dpi=70)
"""
Explanation: 如您所见,在适当的经典模型的帮助下,混合模型通常比纯量子版本收敛得更快。
2.2 使用多个量子滤波器的混合卷积
现在,我们来试试使用多个量子卷积和一个经典神经网络的架构,将其组合在一起。
<img src="./images/qcnn_6.png" width="1000">
2.2.1 模型定义
End of explanation
"""
multi_qconv_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
multi_qconv_history = multi_qconv_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'][:25], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'][:25], label='Hybrid CNN')
plt.plot(multi_qconv_history.history['val_custom_accuracy'][:25],
label='Hybrid CNN \n Multiple Quantum Filters')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
plt.show()
"""
Explanation: 2.2.2 训练模型
End of explanation
"""
|
dewitt-li/deep-learning | first-neural-network/Your_first_neural_network.ipynb | mit | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head(50)
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*100].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
data.head()
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1/(1+np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y-final_outputs # Output layer error is the difference between desired target and actual output.
output_error_term = error
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(output_error_term, self.weights_hidden_to_output.T)
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs)
# TODO: Backpropagated error terms - Replace these values with your calculations.
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term*X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term*hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output +=self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden +=self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
<img src="assets/neural_network.png" width=300px>
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
"""
Explanation: Unit tests
Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.
End of explanation
"""
import sys
### Set the hyperparameters here ###
iterations = 2000
learning_rate = 0.08
hidden_nodes = 60
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of iterations
This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
|
diegocavalca/Studies | deep-learnining-specialization/2. improving deep neural networks/resources/Regularization.ipynb | cc0-1.0 | # import packages
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
"""
Explanation: Regularization
Welcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that overfitting can be a serious problem, if the training dataset is not big enough. Sure it does well on the training set, but the learned network doesn't generalize to new examples that it has never seen!
You will learn to: Use regularization in your deep learning models.
Let's first import the packages you are going to use.
End of explanation
"""
train_X, train_Y, test_X, test_Y = load_2D_dataset()
"""
Explanation: Problem Statement: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head.
<img src="images/field_kiank.png" style="width:600px;height:350px;">
<caption><center> <u> Figure 1 </u>: Football field<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>
They give you the following 2D dataset from France's past 10 games.
End of explanation
"""
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob - probability of keeping a neuron active during drop-out, scalar.
Returns:
parameters -- parameters learned by the model. They can then be used to predict.
"""
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary.
parameters = initialize_parameters(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward propagation.
assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
"""
Explanation: Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.
- If the dot is blue, it means the French player managed to hit the ball with his/her head
- If the dot is red, it means the other team's player hit the ball with their head
Your goal: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.
Analysis of the dataset: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well.
You will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem.
1 - Non-regularized model
You will use the following neural network (already implemented for you below). This model can be used:
- in regularization mode -- by setting the lambd input to a non-zero value. We use "lambd" instead of "lambda" because "lambda" is a reserved keyword in Python.
- in dropout mode -- by setting the keep_prob to a value less than one
You will first try the model without any regularization. Then, you will implement:
- L2 regularization -- functions: "compute_cost_with_regularization()" and "backward_propagation_with_regularization()"
- Dropout -- functions: "forward_propagation_with_dropout()" and "backward_propagation_with_dropout()"
In each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.
End of explanation
"""
parameters = model(train_X, train_Y)
print ("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
"""
Explanation: Let's train the model without any regularization, and observe the accuracy on the train/test sets.
End of explanation
"""
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: The train accuracy is 94.8% while the test accuracy is 91.5%. This is the baseline model (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.
End of explanation
"""
# GRADED FUNCTION: compute_cost_with_regularization
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
### START CODE HERE ### (approx. 1 line)
L2_regularization_cost = 0.5/m * lambd * sum([np.sum(np.square(W)) for W in (W1, W2, W3)])
### END CODER HERE ###
cost = cross_entropy_cost + L2_regularization_cost
return cost
A3, Y_assess, parameters = compute_cost_with_regularization_test_case()
print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))
"""
Explanation: The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.
2 - L2 Regularization
The standard way to avoid overfitting is called L2 regularization. It consists of appropriately modifying your cost function, from:
$$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{L}\right) + (1-y^{(i)})\log\left(1- a^{L}\right) \large{)} \tag{1}$$
To:
$$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{L}\right) + (1-y^{(i)})\log\left(1- a^{L}\right) \large{)} }\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$
Let's modify your cost and observe the consequences.
Exercise: Implement compute_cost_with_regularization() which computes the cost given by formula (2). To calculate $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , use :
python
np.sum(np.square(Wl))
Note that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \frac{1}{m} \frac{\lambda}{2} $.
End of explanation
"""
# GRADED FUNCTION: backward_propagation_with_regularization
def backward_propagation_with_regularization(X, Y, cache, lambd):
"""
Implements the backward propagation of our baseline model to which we added an L2 regularization.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation()
lambd -- regularization hyperparameter, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
### START CODE HERE ### (approx. 1 line)
dW3 = 1./m * np.dot(dZ3, A2.T) + lambd/m * W3
### END CODE HERE ###
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
### START CODE HERE ### (approx. 1 line)
dW2 = 1./m * np.dot(dZ2, A1.T) + lambd/m * W2
### END CODE HERE ###
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
### START CODE HERE ### (approx. 1 line)
dW1 = 1./m * np.dot(dZ1, X.T) + lambd/m * W1
### END CODE HERE ###
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()
grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)
print ("dW1 = "+ str(grads["dW1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("dW3 = "+ str(grads["dW3"]))
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**cost**
</td>
<td>
1.78648594516
</td>
</tr>
</table>
Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost.
Exercise: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\frac{d}{dW} ( \frac{1}{2}\frac{\lambda}{m} W^2) = \frac{\lambda}{m} W$).
End of explanation
"""
parameters = model(train_X, train_Y, lambd = 0.7)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**dW1**
</td>
<td>
[[-0.25604646 0.12298827 -0.28297129]
[-0.17706303 0.34536094 -0.4410571 ]]
</td>
</tr>
<tr>
<td>
**dW2**
</td>
<td>
[[ 0.79276486 0.85133918]
[-0.0957219 -0.01720463]
[-0.13100772 -0.03750433]]
</td>
</tr>
<tr>
<td>
**dW3**
</td>
<td>
[[-1.77691347 -0.11832879 -0.09397446]]
</td>
</tr>
</table>
Let's now run the model with L2 regularization $(\lambda = 0.7)$. The model() function will call:
- compute_cost_with_regularization instead of compute_cost
- backward_propagation_with_regularization instead of backward_propagation
End of explanation
"""
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: Congrats, the test set accuracy increased to 93%. You have saved the French football team!
You are not overfitting the training data anymore. Let's plot the decision boundary.
End of explanation
"""
# GRADED FUNCTION: forward_propagation_with_dropout
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
"""
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1,1)
cache -- tuple, information stored for computing the backward propagation
"""
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above.
D1 = np.random.rand(*A1.shape) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
D1 = (D1 < keep_prob) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 = A1 * D1 # Step 3: shut down some neurons of A1
A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
### START CODE HERE ### (approx. 4 lines)
D2 = np.random.rand(*A2.shape) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
D2 = (D2 < keep_prob) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
A2 = A2 * D2 # Step 3: shut down some neurons of A2
A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
X_assess, parameters = forward_propagation_with_dropout_test_case()
A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)
print ("A3 = " + str(A3))
"""
Explanation: Observations:
- The value of $\lambda$ is a hyperparameter that you can tune using a dev set.
- L2 regularization makes your decision boundary smoother. If $\lambda$ is too large, it is also possible to "oversmooth", resulting in a model with high bias.
What is L2-regularization actually doing?:
L2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes.
<font color='blue'>
What you should remember -- the implications of L2-regularization on:
- The cost computation:
- A regularization term is added to the cost
- The backpropagation function:
- There are extra terms in the gradients with respect to weight matrices
- Weights end up smaller ("weight decay"):
- Weights are pushed to smaller values.
3 - Dropout
Finally, dropout is a widely used regularization technique that is specific to deep learning.
It randomly shuts down some neurons in each iteration. Watch these two videos to see what this means!
<!--
To understand drop-out, consider this conversation with a friend:
- Friend: "Why do you need all these neurons to train your network and classify images?".
- You: "Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!"
- Friend: "I see, but are you sure that your neurons are learning different features and not all the same features?"
- You: "Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution."
!-->
<center>
<video width="620" height="440" src="images/dropout1_kiank.mp4" type="video/mp4" controls>
</video>
</center>
<br>
<caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep_prob$ or keep it with probability $keep_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption>
<center>
<video width="620" height="440" src="images/dropout2_kiank.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption>
When you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time.
3.1 - Forward propagation with dropout
Exercise: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer.
Instructions:
You would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:
1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using np.random.rand() to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{1} d^{1} ... d^{1}] $ of the same dimension as $A^{[1]}$.
2. Set each entry of $D^{[1]}$ to be 0 with probability (1-keep_prob) or 1 with probability (keep_prob), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: X = (X < 0.5). Note that 0 and 1 are respectively equivalent to False and True.
3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.
4. Divide $A^{[1]}$ by keep_prob. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)
End of explanation
"""
# GRADED FUNCTION: backward_propagation_with_dropout
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
"""
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
### START CODE HERE ### (≈ 2 lines of code)
dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
### START CODE HERE ### (≈ 2 lines of code)
dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation
dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()
gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)
print ("dA1 = " + str(gradients["dA1"]))
print ("dA2 = " + str(gradients["dA2"]))
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**A3**
</td>
<td>
[[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]
</td>
</tr>
</table>
3.2 - Backward propagation with dropout
Exercise: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache.
Instruction:
Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:
1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to A1. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to dA1.
2. During forward propagation, you had divided A1 by keep_prob. In backpropagation, you'll therefore have to divide dA1 by keep_prob again (the calculus interpretation is that if $A^{[1]}$ is scaled by keep_prob, then its derivative $dA^{[1]}$ is also scaled by the same keep_prob).
End of explanation
"""
parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**dA1**
</td>
<td>
[[ 0.36544439 0. -0.00188233 0. -0.17408748]
[ 0.65515713 0. -0.00337459 0. -0. ]]
</td>
</tr>
<tr>
<td>
**dA2**
</td>
<td>
[[ 0.58180856 0. -0.00299679 0. -0.27715731]
[ 0. 0.53159854 -0. 0.53159854 -0.34089673]
[ 0. 0. -0.00292733 0. -0. ]]
</td>
</tr>
</table>
Let's now run the model with dropout (keep_prob = 0.86). It means at every iteration you shut down each neurons of layer 1 and 2 with 24% probability. The function model() will now call:
- forward_propagation_with_dropout instead of forward_propagation.
- backward_propagation_with_dropout instead of backward_propagation.
End of explanation
"""
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you!
Run the code below to plot the decision boundary.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/bcc/cmip6/models/bcc-csm2-hr/ocean.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bcc', 'bcc-csm2-hr', 'ocean')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocean
MIP Era: CMIP6
Institute: BCC
Source ID: BCC-CSM2-HR
Topic: Ocean
Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing.
Properties: 133 (101 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:39
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean model code (NEMO 3.6, MOM 5.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the ocean.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the ocean component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
"""
Explanation: 2.2. Eos Functional Temp
Is Required: TRUE Type: ENUM Cardinality: 1.1
Temperature used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
"""
Explanation: 2.3. Eos Functional Salt
Is Required: TRUE Type: ENUM Cardinality: 1.1
Salinity used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
"""
Explanation: 2.4. Eos Functional Depth
Is Required: TRUE Type: ENUM Cardinality: 1.1
Depth or pressure used in EOS for sea water ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2.5. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.6. Ocean Specific Heat
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Specific heat in ocean (cpocean) in J/(kg K)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.7. Ocean Reference Density
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Boussinesq reference density (rhozero) in kg / m3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Reference date of bathymetry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Type
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the bathymetry fixed in time in the ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Ocean Smoothing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any smoothing or hand editing of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Source
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe source of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how isolated seas is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. River Mouth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how river mouth mixing or estuaries specific treatment is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.4. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.5. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.6. Is Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.7. Thickness Level 1
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Thickness of first surface ocean level (in meters)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Brief description of conservation methodology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in the ocean by the numerical schemes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Consistency Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Corrected Conserved Prognostic Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Set of variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.5. Was Flux Correction Used
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does conservation involve flux correction ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Grid
Ocean grid
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of grid in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical coordinates in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10.2. Partial Steps
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Using partial steps with Z or Z vertical coordinate in ocean ?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Staggering
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal grid staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Diurnal Cycle
Is Required: TRUE Type: ENUM Cardinality: 1.1
Diurnal cycle type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracers time stepping scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Tracers time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Baroclinic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time splitting method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.2. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Barotropic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Details of vertical time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Advection
Ocean advection
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of advection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
"""
Explanation: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of lateral momemtum advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Scheme Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean momemtum advection scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 18.3. ALE
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Using ALE for vertical advection ? (if vertical coordinates are sigma)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Order of lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 19.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for lateral tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Effective Order
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Effective order of limited lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.4. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.5. Passive Tracers
Is Required: FALSE Type: ENUM Cardinality: 0.N
Passive tracers advected
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.6. Passive Tracers Advection
Is Required: FALSE Type: STRING Cardinality: 0.1
Is advection of passive tracers different than active ? if so, describe.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 20.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for vertical tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lateral physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
"""
Explanation: 21.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transient eddy representation in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics momemtum eddy viscosity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 23.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Coeff Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a mesoscale closure in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24.2. Submesoscale Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics tracers eddy diffusity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.4. Coeff Background
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 26.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EIV in lateral physics tracers in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27.2. Constant Val
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If EIV scheme for tracers is constant, specify coefficient value (M2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Flux Type
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV flux (advective or skew)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Added Diffusivity
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV added diffusivity (constant, flow dependent or none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vertical physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there Langmuir cells mixing in upper ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical convection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.2. Tide Induced Mixing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how tide induced mixing is modelled (barotropic, baroclinic, none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.3. Double Diffusion
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there double diffusion
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.4. Shear Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there interior shear mixing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 33.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 34.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of free surface in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Free surface scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 35.3. Embeded Seaice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the sea-ice embeded in the ocean model (instead of levitating) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.2. Type Of Bbl
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 36.3. Lateral Mixing Coef
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.4. Sill Overflow
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any specific treatment of sill overflows
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of boundary forcing in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Surface Pressure
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.3. Momentum Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.4. Tracers Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.5. Wave Effects
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how wave effects are modelled at ocean surface.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.6. River Runoff Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how river runoff from land surface is routed to ocean and any global adjustment done.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.7. Geothermal Heating
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how geothermal heating is present at ocean bottom.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum bottom friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum lateral friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of sunlight penetration scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 40.2. Ocean Colour
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the ocean sunlight penetration scheme ocean colour dependent ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40.3. Extinction Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe and list extinctions depths for sunlight penetration scheme (if applicable).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from atmos in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. From Sea Ice
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from sea-ice in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 41.3. Forced Mode Restoring
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of surface salinity restoring in forced mode (OMIP)
End of explanation
"""
|
nick-youngblut/SIPSim | ipynb/bac_genome/n1210/qSIP/qSIP_dev.ipynb | mit | supInfoFile = '/home/nick/notebook/SIPSim/dev/qSIP/PeerJ_qSIP_preprint/PeerJ_Supplemental_Information.pdf'
"""
Explanation: Developing a simulation methodology for the qSIP method
Method:
qPCR simulation
mean quantifications derived from the absolute count data
variance derived from qSIP paper
values will be multiplied by taxon relative abundances (post-simulation subsampling) to get taxon-specific weighted averaged density differences (y_ijk)
Incorporation calculation
$\text{Buoyant density} = y_{ijk} = x_{jk}$
$\text{Total copies} = y_{ij} = \sum(y_{ijk})$
$\text{taxon density} = W_{ij} = \sum_{k=1}^{K} x_{jk} * \big(\frac{y_{ijk}}{y_{ij}}\big)$
$\text{density diff due to isotope incorp} = Z_i = W_{LABi} - W_{LIGHTi}$
each is mean across all replicates
need to simulate 3 replicates for control & treatment
Bootstrapping
sampling with replication of taxon abundances
sampling from all replicates in a treatment
estimated 90% CI bootstraps
value range encompassing 90% of bootstrap values
Setting variables
End of explanation
"""
import os,sys
%load_ext rpy2.ipython
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(gridExtra)
"""
Explanation: Init
End of explanation
"""
pdfTextFile= os.path.splitext(supInfoFile)[0] + '.txt'
!pdf2txt.py $supInfoFile | perl -pe 's/\r+/ /g; s/[ \t ]+/\t/g' > $pdfTextFile
!head -n 4 $pdfTextFile
!printf '===============\n'
!tail -n 4 $pdfTextFile
!egrep "Table.+S2" $pdfTextFile
"""
Explanation: Deriving qPCR variance from emperical data: qSIP pre-print
pdf2txt method
End of explanation
"""
tableS2File = pdfTextFile= os.path.splitext(supInfoFile)[0] + '_e.txt'
!head -n4 $tableS2File
"""
Explanation: Making a parser was too painful; I just broke down and used a text editor & excel to parse out the table
End of explanation
"""
%%R -i tableS2File -w 900
tbl.s2 = read.delim(tableS2File, sep='\t') %>%
mutate(Tube = as.character(Tube))
ggplot(tbl.s2, aes(density.g.p.mL, X16S.qPCR.copynum.p.ul, color=Tube)) +
geom_point() +
geom_line() +
theme_bw() +
facet_grid(Glucose ~ Water) +
theme(
text = element_text(size=16)
)
%%R -w 600 -h 300
ggplot(tbl.s2, aes(density.g.p.mL, X16S.qPCR.copynum.p.ul)) +
geom_smooth() +
theme_bw() +
theme(
text = element_text(size=16)
)
"""
Explanation: Plotting data
End of explanation
"""
%%R -w 600 -h 300
tbl.s2.s = tbl.s2 %>%
group_by(ntile(density.g.p.mL, 20)) %>%
summarize(min_density = min(density.g.p.mL),
mean_density = mean(density.g.p.mL),
max_density = max(density.g.p.mL),
density_width = max_density - min_density,
var_copy = var(X16S.qPCR.copynum.p.ul),
sd_copy = sd(X16S.qPCR.copynum.p.ul),
mean_copy = mean(X16S.qPCR.copynum.p.ul))
ggplot(tbl.s2.s, aes(mean_density, sd_copy, color='red')) +
geom_bar(stat='identity', aes(width=density_width)) +
theme_bw() +
theme(
text = element_text(size=16)
)
%%R -w 600 -h 500
# how does variance relate to mean copy number
p1 = ggplot(tbl.s2.s, aes(mean_copy, var_copy)) +
geom_point() +
theme_bw() +
theme(
text = element_text(size=16)
)
p2 = ggplot(tbl.s2.s, aes(mean_copy, sd_copy)) +
geom_point() +
geom_smooth(method='lm') +
theme_bw() +
theme(
text = element_text(size=16)
)
grid.arrange(p1, p2, ncol=1)
"""
Explanation: Determining variance across a density window
End of explanation
"""
%%R
res = lm(sd_copy ~ mean_copy, tbl.s2.s)
summary(res)
"""
Explanation: Linear regression of mean_copy_number ~ sd
End of explanation
"""
def subInfo_parser(iFH):
# parsing tableS2 from qSIP paper
for line in iFH:
if re.match('.+Table.+S2', line):
tableS2_parser(iFH)
def tableS2_parser(iFH):
# header = ('#SampleID', 'Tube', 'glucose', 'p-', '16S-')
# is_header = lambda x: [x.startswith(y) for y in header]
tableS2 = {}
for line in iFH:
if any(is_header(line)):
parse
def parse_column(iFH):
with open(pdfTextFile, 'rb') as iFH:
tableS2_parser(iFH)
"""
Explanation: Notes:
standard deviation varied more or less linearly with mean
Implementation in modeling
The qPCR values will be drawn from normal distributions representing posible value distribution
normal dist mean (loc) = community total count value --> 'qPCR mean copy number'
normal dist sd (scale) = community total count value --> 'qPCR mean copy number' --> sd copy number
Bootstrapping
How does the bootstrapping methodology work in the paper?
Possibilities:
OLD
End of explanation
"""
|
pysg/pyther | Modelo de impregnacion/modelo1/Actividad 8 Simulación de impregnación de LDPE.ipynb | mit | import numpy as np
import pandas as pd
import math
import cmath
from scipy.optimize import root
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Simulación de impregnación de LDPE
Introduction
Ce programme nous permet de modéliser la concentration (c2) pour différents food simulant. Cela nous permet également de tracer différents graphiques.
End of explanation
"""
a = ("Table1.txt")
a
"""
Explanation: Polymère
End of explanation
"""
class InterfazPolimero:
def __init__ (self,a):
self.a=a
def Lire(self):
self.tab = pd.read_csv(self.a,sep=" ")
coef =self.tab.values
self.Experiment = coef[:,0]
self.Thickness = coef[:,1]
self.FoodSimulant = coef[:,2]
self.Cpo = coef[:,3]
self.K = coef [:,4]
self.Dp = coef[:,5]
self.RMSE = coef[:,6]
self.k = coef[:,7]
self.c4 = coef[:,8]
# self.c1 =coef[:,9]
self.c2 = np.zeros(10)
return self.tab
def inicializarC2(self):
self.c2 = np.zeros(10)
self.dimension = np.shape(self.c2)
print(self.dimension)
return self.c2
def calcul(self):
self.tab["j1"] = (self.tab["Dp"] / (self.tab["Thickness"] / 2)) * (self.tab["Cpo"] - self.c2)
print(self.tab["j1"])
self.c3 = self.c2 / self.K
self.j2 = self.k * (self.c3 - self.tab["c4"])
return (self.tab["j1"] - self.j2) / self.tab["j1"]
def calcul2(self):
i = 0
for self.tab["Thickness"], self.tab["Dp"], self.tab["K"], self.tab["k"], self.tab["c"] in enumerate(tab):
self.sol = root(calcul,15,args=(float(self.tab["Dp"]),float(self.tab["k"]),float(self.tab["K"]),float(self.tab["c4"]),float(self.tab["Cpo"]),float(self.tab["Thickness"])))
c2[i]= self.sol.x
i = i + 1
print(self.c2)
return self.c2
def Garder(self):
raw_data ={"résultat" : [1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793]}
df = pd.DataFrame(raw_data,index=["1","2","3","4","5","6","7","8","9","10"])
df.to_csv("c2rep")
return df
def Graphique(self):
plt.plot(self.tab["Dp"],self.Cpo,"^")
plt.title("f(Dp)=Cpo")
plt.xlabel("Dp")
plt.ylabel("Cpo")
def Graphique2(self):
plt.plot(self.tab["Dp"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Dp)=c2")
plt.xlabel("Dp")
plt.ylabel("c2")
def Graphique3(self):
plt.plot(self.tab["Cpo"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Cpo)=c2")
plt.xlabel("Cpo")
plt.ylabel("c2")
def Graphique4(self):
plt.plot(self.tab["Thickness"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Epaisseur)=c2")
plt.xlabel("Epaisseur")
plt.ylabel("c2")
def Graphique5(self):
fig,axes=plt.subplots(2,2)
axes[0,0].plot(self.tab["Dp"],self.Cpo,"^")
axes[1,1].plot(self.tab["Dp"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
axes[0,1].plot(self.tab["Cpo"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
axes[1,0].plot(self.tab["Thickness"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
p = InterfazPolimero("Table1.txt")
p
"""
Explanation: Calcul de la concentration finale
Nous avons besoin de différentes valeurs de concentration qui sont les suivantes :
Afin de calculer la concentration finale, nous avons besoin d'équations qui sont les suivantes :
End of explanation
"""
p.Lire()
"""
Explanation: Table des valeurs
Ici, nous pouvons voir les valeurs obtenus pour chaque expériences. Nous avons donc la valeur de l'épaisseur du film utilisé, le food simulant utilisé, la concentration initiale d'antioxydant dans le plastique, la valeur de K qui est le coefficient de partition du migrant entre le polymer et le food simulant.Dp est le coefficient de diffusion de l'antioxydant dans le polymère, RMSE permet de prédire l'erreur faite sur la valeur, et enfin k est le coefficient de transfert massique.
Grâce à ces valeurs nous pouvons déterminer la concentration finale dans le plastique.
End of explanation
"""
p.calcul()
"""
Explanation: Calcul de c2
Ce calcul nous permet donc d'obtenir les valeurs de la concentration finale dans le plastique et donc de déterminer l'efficacité du procédé.
End of explanation
"""
p.Graphique()
"""
Explanation: Graphique : f(Dp) = Cpo
End of explanation
"""
p.Graphique2()
"""
Explanation: Graphique : f(Dp) = c2
End of explanation
"""
p.Graphique3()
"""
Explanation: Graphique : f(Cpo) = c2
End of explanation
"""
p.Graphique4()
p.Graphique5()
"""
Explanation: Grapgique : f(Epaisseur) = c2
End of explanation
"""
|
NYUDataBootcamp/Projects | MBA_S17/Sasidharan.ipynb | mit | import pandas as pd # Importing necessary data package
import matplotlib.pyplot as plt # pyplot module
import numpy as np
"""
Explanation: Python Real Estate Analysis Project
May 2017
Written by Divya Sasidharan at NYU Stern
Contact: ds5151@nyu.edu
Overview
Real estate is an active area in both investment market and academic research. Foreclosure auction properties are special properties that have different transaction and payment procedures from the regular sales, and the home buyers take much higher risk. In this project, we built a scalable analytics workflow to extract, clean and analyze foreclosure property data from multiple sources using Python tools and lib. We analyzed the correlation of foreclosure property price with crime score, walk score and school rating. We have that crime and school is major factor influencing the property price and demand.
End of explanation
"""
Zillow = pd.ExcelFile("Properties_philly_Kraggle_v2.xlsx")
zz = Zillow.parse('Properties_philly_Kraggle_v2')
zz
print('Dimensions: ', zz.shape) # looking at the categories I can work with
print('Column labels: ', zz.columns) #Listing out the Column headings
print('Row labels: ', zz.index)
z = zz.dropna() #Dropped empty rows that does have any data
print('Dimensions: ', z.shape)
"""
Explanation: Data Soure
Downloaded the Philadelphia real estate dataset from Kaggle and import using Pandas lib. Please refer the following link.
https://www.kaggle.com/harry007/philly-real-estate-data-set-sample/downloads/philly-real-estate-data-set-sample.zip
End of explanation
"""
plt.scatter(z['Postal Code'], z[' Violent Crime Rate '])
plt.show()
plt.scatter(z['Postal Code'], z[' Avg Walk&Transit score '])
plt.show()
plt.scatter(z['Postal Code'], z[' School Score '])
plt.show()
"""
Explanation: Crime rate, Walks and School score in each postal code
Following are the observation based on analyzing average House price by Crime rate, walkscore and school score against postal code. The data that affected the price change most was crime for the city of Philadelphia. School rating also influenced the price of the property. Walkscore did not have a strong impact it was overridden by crime score but it had combined effect at the city center.
End of explanation
"""
plt.scatter(z['Postal Code'], z['Sale Price/bid price'])
plt.show()
z1 = pd.DataFrame(z, columns = ['Address',
'Zillow Address',
'Sale Date',
'Opening Bid',
'Sale Price/bid price',
'Book/Writ',
'OPA',
'Postal Code',
'Attorney',
'Ward',
'Seller',
'Buyer',
'Sheriff Cost',
'Advertising',
'Other',
'Record Deed',
'Water',
'PGW',
' Avg Walk&Transit score ',
' Violent Crime Rate ',
' School Score ',
'Zillow Estimate',
'Rent Estimate',
'taxAssessment',
'yearBuilt',
'finished \n(SqFt)',
' bathrooms ',
' bedrooms ',
'PropType',
'Average comps'])
"""
Explanation: House sales price by zipcode
Below analyis shows that most of the house price are below $150,000 in the city of Philadelphia.
End of explanation
"""
z1['Avg. Price'] = z1[['Zillow Estimate', 'taxAssessment', 'Average comps']].mean(axis=1)
list(z1)
z1['Avg. Price'].head()
"""
Explanation: Calculating Average price of the property
Average price of the property = Zillow Estimate + Tax asssesment value + Average comps of similar property
End of explanation
"""
z1['Avg. price per sq. ft'] = z1['Avg. Price']/z1['finished \n(SqFt)']
z1['Avg. price per sq. ft'].head()
"""
Explanation: Average Price per sq. ft
Average price per sq.ft = Average price/finished square foot.
End of explanation
"""
z1[' Violent Crime Rate '].median()
z1[' Violent Crime Rate '].min()
z1[' Violent Crime Rate '].max()
crimerank = []
for row in z1[' Violent Crime Rate ']:
if row<0.344:
crimerank.append(1)
elif row>=0.344 and row<0.688:
crimerank.append(2)
elif row>=0.688 and row<1.032:
crimerank.append(3)
elif row>=1.032 and row<1.376:
crimerank.append(4)
else:
crimerank.append(5)
z1['Crime Rank'] = crimerank
z1['Crime Rank'].head()
zcrime = z1.groupby(['Crime Rank'])['Avg. Price'].mean()
zcrime
"""
Explanation: Calculating rank based on Crime rate
Crime rate has been biffercated into various buckets based on the min, median and max. Please refer the below code for the buckets allocated.
End of explanation
"""
plt.figure(figsize = (16,7)) # plotting our data
plt.plot(zcrime, color = 'red', marker= '*')
plt.suptitle('Average price by crime', fontsize=18)
plt.xlabel('Crime Rank', fontsize=12)
plt.ylabel('Average price of houses', fontsize=12)
plt.show()
"""
Explanation: Impact of Crime over sales price
There are several factors that influences the house value and the most important is the crime for the city of Philadelphia. The same factor may not be the most influential factor in other cities for example, Manhattan, NY. The following chart shows that higher the rank (lower crime) higher the price of the property.
End of explanation
"""
Walkrank = []
for row in z1[' Avg Walk&Transit score ']:
if row>88:
Walkrank.append(1)
elif row>77 and row<=88:
Walkrank.append(2)
elif row>66 and row<=77:
Walkrank.append(3)
elif row>55 and row<=66:
Walkrank.append(4)
else:
Walkrank.append(5)
z1['Walk Rank'] = Walkrank
z1['Walk Rank'].head()
zwalk = z1.groupby(['Walk Rank'])['Avg. Price'].mean()
zwalk
"""
Explanation: Calculating rank based on Walk score
Walkscore has been biffercated into various buckets based on the min, median and max. Please refer the below code for the buckets allocated.
End of explanation
"""
plt.figure(figsize = (16,7)) # plotting our data
plt.plot(zwalk, color = 'blue', marker= '*')
plt.suptitle('Average price by walkrank', fontsize=18)
plt.xlabel('Walk Rank', fontsize=12)
plt.ylabel('Average price of houses', fontsize=12)
plt.show()
"""
Explanation: Impact of Walkscore over sales price
Philadelphia has the 6th best walkscore after New York, Jersey City, San Francisco, Boston and Newark. Analysis carried out by John I. Gilderbloom, William W. Riggs and Wesley L. Meares shows that walkability has a positive impact not only on neighborhood housing valuation but also on reduction of crime and foreclosure[8]. Our research analysis of Philadelphia did not find strong correlation between Walkscore and property price except in the city center.
End of explanation
"""
Schoolrank = []
for row in z1[' School Score ']:
if row>57.308:
Schoolrank.append(1)
elif row>43.816 and row<=57.308:
Schoolrank.append(2)
elif row>30.324 and row<=43.816:
Schoolrank.append(3)
elif row>16.832 and row<=30.324:
Schoolrank.append(4)
else:
Schoolrank.append(5)
z1['School Rank'] = Schoolrank
zschool = z1.groupby(['School Rank'])['Avg. Price'].mean()
zschool
"""
Explanation: Calculating rank based on School rating
School ratings has been biffercated into various buckets based on the min, median and max. Please refer the below code for the buckets allocated.
End of explanation
"""
plt.figure(figsize = (16,7)) # plotting our data
plt.plot(zschool, color = 'blue', marker= '*')
plt.suptitle('Average price by schoolrank', fontsize=18)
plt.xlabel('School Rank', fontsize=12)
plt.ylabel('Average price of houses', fontsize=12)
plt.show()
z1.head()
"""
Explanation: Impact of School score over sales price
School rating is also influenced the price of the property. Better school score also improves the demand of the property.
End of explanation
"""
z1['Closing Cost'] = z1['Avg. Price']*.085
z1['Rehab Cost'] = z1['finished \n(SqFt)']*25
z1['Estimated Max Bid Price']= z1['Avg. Price']-z1['Rehab Cost']
z1.head()
"""
Explanation: z1['Avg. price per sq. ft'] = z1['Avg. Price']/z1['finished \n(SqFt)']
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.12.2/examples/notebooks/generated/contrasts.ipynb | bsd-3-clause | import numpy as np
import statsmodels.api as sm
"""
Explanation: Contrasts Overview
End of explanation
"""
import pandas as pd
url = 'https://stats.idre.ucla.edu/stat/data/hsb2.csv'
hsb2 = pd.read_table(url, delimiter=",")
hsb2.head(10)
"""
Explanation: This document is based heavily on this excellent resource from UCLA http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm
A categorical variable of K categories, or levels, usually enters a regression as a sequence of K-1 dummy variables. This amounts to a linear hypothesis on the level means. That is, each test statistic for these variables amounts to testing whether the mean for that level is statistically significantly different from the mean of the base category. This dummy coding is called Treatment coding in R parlance, and we will follow this convention. There are, however, different coding methods that amount to different sets of linear hypotheses.
In fact, the dummy coding is not technically a contrast coding. This is because the dummy variables add to one and are not functionally independent of the model's intercept. On the other hand, a set of contrasts for a categorical variable with k levels is a set of k-1 functionally independent linear combinations of the factor level means that are also independent of the sum of the dummy variables. The dummy coding is not wrong per se. It captures all of the coefficients, but it complicates matters when the model assumes independence of the coefficients such as in ANOVA. Linear regression models do not assume independence of the coefficients and thus dummy coding is often the only coding that is taught in this context.
To have a look at the contrast matrices in Patsy, we will use data from UCLA ATS. First let's load the data.
Example Data
End of explanation
"""
hsb2.groupby('race')['write'].mean()
"""
Explanation: It will be instructive to look at the mean of the dependent variable, write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African American and 4 = Caucasian)).
End of explanation
"""
from patsy.contrasts import Treatment
levels = [1,2,3,4]
contrast = Treatment(reference=0).code_without_intercept(levels)
print(contrast.matrix)
"""
Explanation: Treatment (Dummy) Coding
Dummy coding is likely the most well known coding scheme. It compares each level of the categorical variable to a base reference level. The base reference level is the value of the intercept. It is the default contrast in Patsy for unordered categorical factors. The Treatment contrast matrix for race would be
End of explanation
"""
hsb2.race.head(10)
print(contrast.matrix[hsb2.race-1, :][:20])
pd.get_dummies(hsb2.race.values, drop_first=False)
"""
Explanation: Here we used reference=0, which implies that the first level, Hispanic, is the reference category against which the other level effects are measured. As mentioned above, the columns do not sum to zero and are thus not independent of the intercept. To be explicit, let's look at how this would encode the race variable.
End of explanation
"""
from statsmodels.formula.api import ols
mod = ols("write ~ C(race, Treatment)", data=hsb2)
res = mod.fit()
print(res.summary())
"""
Explanation: This is a bit of a trick, as the race category conveniently maps to zero-based indices. If it does not, this conversion happens under the hood, so this will not work in general but nonetheless is a useful exercise to fix ideas. The below illustrates the output using the three contrasts above
End of explanation
"""
from patsy.contrasts import ContrastMatrix
def _name_levels(prefix, levels):
return ["[%s%s]" % (prefix, level) for level in levels]
class Simple(object):
def _simple_contrast(self, levels):
nlevels = len(levels)
contr = -1./nlevels * np.ones((nlevels, nlevels-1))
contr[1:][np.diag_indices(nlevels-1)] = (nlevels-1.)/nlevels
return contr
def code_with_intercept(self, levels):
contrast = np.column_stack((np.ones(len(levels)),
self._simple_contrast(levels)))
return ContrastMatrix(contrast, _name_levels("Simp.", levels))
def code_without_intercept(self, levels):
contrast = self._simple_contrast(levels)
return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1]))
hsb2.groupby('race')['write'].mean().mean()
contrast = Simple().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Simple)", data=hsb2)
res = mod.fit()
print(res.summary())
"""
Explanation: We explicitly gave the contrast for race; however, since Treatment is the default, we could have omitted this.
Simple Coding
Like Treatment Coding, Simple Coding compares each level to a fixed reference level. However, with simple coding, the intercept is the grand mean of all the levels of the factors. Patsy does not have the Simple contrast included, but you can easily define your own contrasts. To do so, write a class that contains a code_with_intercept and a code_without_intercept method that returns a patsy.contrast.ContrastMatrix instance
End of explanation
"""
from patsy.contrasts import Sum
contrast = Sum().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Sum)", data=hsb2)
res = mod.fit()
print(res.summary())
"""
Explanation: Sum (Deviation) Coding
Sum coding compares the mean of the dependent variable for a given level to the overall mean of the dependent variable over all the levels. That is, it uses contrasts between each of the first k-1 levels and level k In this example, level 1 is compared to all the others, level 2 to all the others, and level 3 to all the others.
End of explanation
"""
hsb2.groupby('race')['write'].mean().mean()
"""
Explanation: This corresponds to a parameterization that forces all the coefficients to sum to zero. Notice that the intercept here is the grand mean where the grand mean is the mean of means of the dependent variable by each level.
End of explanation
"""
from patsy.contrasts import Diff
contrast = Diff().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Diff)", data=hsb2)
res = mod.fit()
print(res.summary())
"""
Explanation: Backward Difference Coding
In backward difference coding, the mean of the dependent variable for a level is compared with the mean of the dependent variable for the prior level. This type of coding may be useful for a nominal or an ordinal variable.
End of explanation
"""
res.params["C(race, Diff)[D.1]"]
hsb2.groupby('race').mean()["write"][2] - \
hsb2.groupby('race').mean()["write"][1]
"""
Explanation: For example, here the coefficient on level 1 is the mean of write at level 2 compared with the mean at level 1. Ie.,
End of explanation
"""
from patsy.contrasts import Helmert
contrast = Helmert().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Helmert)", data=hsb2)
res = mod.fit()
print(res.summary())
"""
Explanation: Helmert Coding
Our version of Helmert coding is sometimes referred to as Reverse Helmert Coding. The mean of the dependent variable for a level is compared to the mean of the dependent variable over all previous levels. Hence, the name 'reverse' being sometimes applied to differentiate from forward Helmert coding. This comparison does not make much sense for a nominal variable such as race, but we would use the Helmert contrast like so:
End of explanation
"""
grouped = hsb2.groupby('race')
grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean()
"""
Explanation: To illustrate, the comparison on level 4 is the mean of the dependent variable at the previous three levels taken from the mean at level 4
End of explanation
"""
k = 4
1./k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k-1].mean())
k = 3
1./k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k-1].mean())
"""
Explanation: As you can see, these are only equal up to a constant. Other versions of the Helmert contrast give the actual difference in means. Regardless, the hypothesis tests are the same.
End of explanation
"""
hsb2['readcat'] = np.asarray(pd.cut(hsb2.read, bins=3))
hsb2.groupby('readcat').mean()['write']
from patsy.contrasts import Poly
levels = hsb2.readcat.unique().tolist()
contrast = Poly().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(readcat, Poly)", data=hsb2)
res = mod.fit()
print(res.summary())
"""
Explanation: Orthogonal Polynomial Coding
The coefficients taken on by polynomial coding for k=4 levels are the linear, quadratic, and cubic trends in the categorical variable. The categorical variable here is assumed to be represented by an underlying, equally spaced numeric variable. Therefore, this type of encoding is used only for ordered categorical variables with equal spacing. In general, the polynomial contrast produces polynomials of order k-1. Since race is not an ordered factor variable let's use read as an example. First we need to create an ordered categorical from read.
End of explanation
"""
|
bjshaw/phys202-2015-work | assignments/assignment08/InterpolationEx01.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.interpolate import interp1d
"""
Explanation: Interpolation Exercise 1
End of explanation
"""
f = open('trajectory.npz','r')
r = np.load('trajectory.npz')
t = r['t']
y = r['y']
x = r['x']
f.close()
assert isinstance(x, np.ndarray) and len(x)==40
assert isinstance(y, np.ndarray) and len(y)==40
assert isinstance(t, np.ndarray) and len(t)==40
"""
Explanation: 2D trajectory interpolation
The file trajectory.npz contains 3 Numpy arrays that describe a 2d trajectory of a particle as a function of time:
t which has discrete values of time t[i].
x which has values of the x position at those times: x[i] = x(t[i]).
x which has values of the y position at those times: y[i] = y(t[i]).
Load those arrays into this notebook and save them as variables x, y and t:
End of explanation
"""
xt = interp1d(t, x, kind='cubic')
yt = interp1d(t, y, kind='cubic')
newt = np.linspace(np.min(t),np.max(t),200)
newx = xt(newt)
newy = yt(newt)
assert newt[0]==t.min()
assert newt[-1]==t.max()
assert len(newt)==200
assert len(newx)==200
assert len(newy)==200
"""
Explanation: Use these arrays to create interpolated functions $x(t)$ and $y(t)$. Then use those functions to create the following arrays:
newt which has 200 points between ${t_{min},t_{max}}$.
newx which has the interpolated values of $x(t)$ at those times.
newy which has the interpolated values of $y(t)$ at those times.
End of explanation
"""
f = plt.figure(figsize=(7,6))
plt.plot(newx,newy, marker='.',label='interpolated')
plt.plot(x,t, marker='o',linestyle='',label='original')
plt.ylabel('y(t)')
plt.xlabel('x(t)')
plt.title('Parametric Plot')
plt.legend();
assert True # leave this to grade the trajectory plot
"""
Explanation: Make a parametric plot of ${x(t),y(t)}$ that shows the interpolated values and the original points:
For the interpolated points, use a solid line.
For the original points, use circles of a different color and no line.
Customize you plot to make it effective and beautiful.
End of explanation
"""
|
Caranarq/01_Dmine | Datasets/LEED/LEED.ipynb | gpl-3.0 | # Librerias utilizadas
import pandas as pd
import sys
import os
import csv
from lxml import html
import requests
import time
# Configuracion del sistema
print('Python {} on {}'.format(sys.version, sys.platform))
print('Pandas version: {}'.format(pd.__version__))
import platform; print('Running on {} {}'.format(platform.system(), platform.release()))
"""
Explanation: Limpieza de datos sobre edificios con certificacion LEED
1. Introduccion
EL United States Green Building Council (USGBG) tiene una base de datos de edificios que cuentan con certificación LEED alrededor del mundo. La pagina web de USGBG cuenta con una interfaz para hacer consultas directamente a su base de datos, sin embargo no cuenta con una API o una URL directa para descarga masiva por lo que es necesario enviar el query a la base de datos desde la URL de USBG:
https://www.usgbc.org/projects/list?page=17&keys=Mexico
Después de esperar a que la base de datos interprete el query, regresa el archivo "leed_projects.xls" que quedará guardado como "D:\PCCS\00_RawData\01_CSV\LEED\leed_projects.xls"
2. Estandarizacion del dataset
End of explanation
"""
path = r'D:\PCCS\00_RawData\01_CSV\LEED\leed_projects.xlsx'
raw_data = pd.read_excel(path)
raw_data.index.name = 'Building'
raw_data.head()
# Eliminar columnas que no pertenecen a Mexico
print('La tabla tiene {} registros'.format(len(raw_data)))
x = 'United States [us]'
raw_data = raw_data[raw_data['Country'] != x]
print('Quitando los registros donde el país es "{}", la tabla queda con {} registros'.format(x, len(raw_data)))
x = 'Colombia'
raw_data = raw_data[raw_data['Country'] != x]
print('Quitando los registros donde el país es "{}", la tabla queda con {} registros'.format(x, len(raw_data)))
x = 'United States'
raw_data = raw_data[raw_data['Country'] != x]
print('Quitando los registros donde el país es "{}", la tabla queda con {} registros'.format(x, len(raw_data)))
raw_data.head()
"""
Explanation: El archivo tal como se descarga, a pesar de ser tabulados de excel, envia un mensaje de error cuando se intenta abrir directamente como se descargó. Por lo tanto, antes de procesarlo es necesario abrirlo en excel y guardarlo con formato .xlsx
End of explanation
"""
# Descarga el HTML de la pagina
page = requests.get('https://www.usgbc.org/projects/reforma-180')
tree = html.fromstring(page.content)
# Obten variables desde la estructura
street = tree.xpath('//span[@itemprop="streetAddress"]/text()')
locality = tree.xpath('//span[@itemprop="addressLocality"]/text()')
postalcode = tree.xpath('//span[@itemprop="postalCode"]/text()')
country = tree.xpath('//span[@itemprop="addressCountry"]/text()')
''.join(street).replace('\n', '')
# A ver, que datos sacaste?
print('len({}), type({}) - {}'.format(len(street), type(street), street))
print('len({}), type({}) - {}'.format(len(locality), type(locality), locality))
print('len({}), type({}) - {}'.format(len(postalcode), type(postalcode), postalcode))
print('len({}), type({}) - {}'.format(len(country), type(country), country))
"""
Explanation: La base de datos es un listado de edificios que incluye para cada edificio:
- El nombre del edificio
- Una URL de referencia
- La fecha de la certificacion del edificio
- La ciudad, estado y país en el que se ubica el edificio
- El sistema de calificación bajo el ecual se certificó el edificio
- La versión de la certificación
- El nivel alcanzado por el edificio con la certificación.
Debido a que las columnas de Ciudad, estado y país no están realizadas bajo ningun estándar, es necesario asignar a cada renglón las claves geoestadísticas municipales de 5 dígitos correspondientes al municipio en el que se ubica el edificio.
Esto se hará manualmente pues cada renglón tiene que ser interpretado individualmente.
Durante la revision me di cuenta que si bien la tabla no tiene una clave para identificar cada ciudad y municipio, la liga de cada edificio nos lleva a una ficha del municipio que usualmente sí contiene un código postal; y desde el código postal es posible obtener el municipio y el estado.
A continuacion se hace la revision de una pagina para conocer su estructura y hacer un webscrapping desde esta estructura, esperando que sea igual en todas las fichas:
End of explanation
"""
# Script para extraer datos de fichas a partir de la URL
def webcrawler(x):
time.sleep(0.05)
url = x
try:
page = requests.get(x)
tree = html.fromstring(page.content)
except: # Regresa false si no logras entrar a la URL
street = False
locality = False
postalcode = False
country = False
return [street, locality, postalcode, country]
# Saca los datos del tree. Regresa None si no encontraste
try:
street = ''.join(tree.xpath('//span[@itemprop="streetAddress"]/text()'))
except:
street = None
try:
locality = tree.xpath('//span[@itemprop="addressLocality"]/text()')
except:
locality = None
try:
postalcode = tree.xpath('//span[@itemprop="postalCode"]/text()')
except:
postalcode = None
try:
country = tree.xpath('//span[@itemprop="addressCountry"]/text()')
except:
country = None
return [street, locality, postalcode, country]
# Pon al crawler a hacer su chamba (Pero no si el archivo ya existe)
archivoraw = r'D:\PCCS\00_RawData\01_CSV\LEED\crawl_leed.xlsx'
if os.path.isfile(archivoraw):
print('NO SE REALIZÓ EL WEBCRAWL PORQUE YA SE TIENEN LOS DATOS EN \n {}'.format(archivoraw))
print('*** Mejor importa el archivo para no gastar tantos recursos ***')
else:
raw_data['crawl'] = raw_data.Path.apply(webcrawler)
"""
Explanation: Todos los datos son listas, pero "street" tiene 2 elementos. Entonces para el script lo que voy a hacer será eliminar todos los saltos de linea y concatenar el texto de todos los elementos de la lista
End of explanation
"""
raw_data.head()
# Guarda una copia de raw_data por si es necesario ocupar este dataset de nuevo,
# que no se tenga que hacer nuevamente el webcrawiling porque consume mucho tiempo
writer = pd.ExcelWriter(archivoraw)
raw_data.to_excel(writer, sheet_name = 'DATOS')
writer.save()
# Crea una copia de trabajo de raw_data
datasetfinal = raw_data
# Crea una columna única con los datos de dirección y código postal extraídos con el crawler.
datasetfinal['address'] = datasetfinal.crawl.apply(lambda x: x[0].replace('\n', ''))
# raw_data['city'] = raw_data.crawl.apply(lambda x: x[1][0].replace('/n', ''))
datasetfinal['CP'] = datasetfinal.crawl.apply(lambda x: str(x[2][0]))
# raw_data['city'] = raw_data.crawl.apply(lambda x: x[3][0].replace('/n', ''))
datasetfinal.head(2)
"""
Explanation: Reemplaza los enters en cada lista
(Voy a saltarme este paso porque lo que me interesa en realidad es el Codigo Postal, pero dejo el codigo por si lo ocupo en el futuro)
def listtotext(x):
templist = []
for element in x:
if element == None or element == False:
templist.append(element)
else:
templist.append(''.join(x).replace('\n', ''))
return templist
End of explanation
"""
bd_sepo = r'D:\PCCS\01_Dmine\Datasets\SEPOMEX\sepomex_CP_CVEMUN.xlsx'
SEPOMEX = pd.read_excel(bd_sepo, dtype={'CVE_MUN':'str', 'CP':'str'})
SEPOMEX.head(3)
"""
Explanation: A partir de los Codigos Postales ya es posible identificar la ciudad y municipio a la que pertenece cada edificio. Para esto, vamos a utilizar la base de datos de codigos postales del SEPOMEX que se descargó en otra minería de datos:
End of explanation
"""
datasetfinal.head()
# Copiar CVE_MUN del dataset en base al codigo postal
datasetfinal = datasetfinal.reset_index().merge(SEPOMEX, on='CP', how='left').set_index('Building')
datasetfinal.head()
"""
Explanation: Con la base de datos del SEPOMEX ya es posible unir ambos datasets para obtener las claves municipales de cada edificio
End of explanation
"""
len(datasetfinal[datasetfinal['CVE_MUN'].isnull()])
"""
Explanation: Quedan 70 filas en donde no fue posible identificar la clave Municipal
End of explanation
"""
mira = ['City', 'State', 'CP', 'address', 'CVE_MUN'] # El diccionario 'mira' se utilizará en adelante para imprimir subsets de la informacion
sinmun = datasetfinal[datasetfinal['CVE_MUN'].isnull()][mira]
sinmun.head()
len(sinmun['CP'].unique())
"""
Explanation: Casos Particulares
Estos 70 registros tienen 33 claves unicas de C que requieren ser asignadas individualmente para conocer la CVE_MUN de cada edificio. Para esto, haremos un script que permita revisar cada clave para realizar la investigación necesaria y asignarle una CVE_MUN
End of explanation
"""
# Diccionario creado en donde key = 'CP' y value = 'CVE_MUN'
defmuns = {'00000': None,
'00100': '09010',
'00502': '15024',
'00604': '15121',
'00702': '15051',
'01006': '09010',
'01152': '09010',
'01209': '09004',
'01300': '09004',
'03130': '09014',
'03210': '09014',
'05300': '09004',
'05490': '15104',
'05940': '15013',
'08424': '14094',
'11010': '09016',
'11111': '14098',
'11570': '09016',
'12345': None,
'21118': '02002',
'22320': '02004',
'23410': '03008',
'23479': '03008',
'31240': '08019',
'46685': '14006',
'48219': '16053',
'56277': '15099',
'66601': '19006',
'67114': '19026',
'76232': '22014',
'77780': '23009',
'78341': '24028',
'87131': None}
"""
Explanation: En el siguiente diccionario recopila las CVE_MUN que se asignarán a los códigos postales que requieren asignacion individual. Los códigos cuyo valor es None se asignarán mas adelante
End of explanation
"""
# Diccionario en donde key = Codigo postal listado en el dataset; value = Codigo postal correcto
deberiaser = {'00100': '45620',
'00502': '54830',
'00604': '54713',
'00702': '52004',
'03130': '03103',
'11111': '45620',
'48219': '58218'}
"""
Explanation: El siguiente diccionario incluye códigos postales que requieren ser corregidos
End of explanation
"""
# Reemplazar las CVE_MUN identificadas en el dataset final
datasetfinal['CVE_MUN'] = datasetfinal['CP'].map(defmuns).fillna(datasetfinal['CVE_MUN'])
"""
Explanation: Asignacion de codigos postales
End of explanation
"""
sinmun.loc[sinmun['CP'].isin(['00000', '12345'])]
# Diccionario con edificios que se asignaran individualmente
# Para este diccionario key = Nombre del edificio, value = CVE_MUN que se asignará a este edificio
buildings = {
'Grainger Mexico HQ': '19039',
'La Concha Pearl': '03003',
#'Schneider Electric at COK': '66629', # Este edificio esta repetido, por lo que no se le asignará nada y se eliminará al final
'Bank of America-Reforma 115 5th floor': '09016',
'Vesta Corporate Headquarters': '09016',
'Air Traffic Control Tower': '15101', # Estoy considerando que esta es la Torre de Control del NAICM
'Passenger Terminal Building': '15101', # El edificio del NAICM
'Area Control Center': '15101', # Infraestructura del NAICM
'Corporativo TRIO': '09004',
'Casa GF': '19019',
'Eurocenter 2': '09004',
'ROUZ TOWER': '09014',
'Periferico Sur Parque Industrial': '14098'
}
# Hay un edificio duplicado. El duplicado se eliminará mas adelante
datasetfinal.loc['Schneider Electric at COK'][mira]
# Reemplazar valores individuales en el dataset.
for k, v in buildings.items():
building = datasetfinal.loc[k].name
CVEMUN_prev = datasetfinal.loc[k]['CVE_MUN']
datasetfinal.at[k, 'CVE_MUN'] = v
print('Edificio:{} - la CVE_MUN {} se reemplazó por {}'.format(building, CVEMUN_prev, datasetfinal.at[k, 'CVE_MUN']))
"""
Explanation: Algunos edificios, marcados con los codigos postales 00000 y 12345 (Intuyo que por desidia del capturista) se tendrán que asignar individualmente
End of explanation
"""
sinmun[sinmun['CP'] == '87131']
"""
Explanation: El dataset contiene dos edificios en el dataset que no corresponden a México:
End of explanation
"""
datasetfinal[datasetfinal['CVE_MUN'].isnull()][mira]
"""
Explanation: Se eliminarán del dataset los siguientes edificios:
End of explanation
"""
datasetfinal = datasetfinal.dropna(subset=['CVE_MUN'])
datasetfinal.head(3)
"""
Explanation: El primero por estar repetido y el resto por que no están en los Estados Unidos Mexicanos.
End of explanation
"""
datasetfinal[datasetfinal['CP'].isin(list(deberiaser.keys()))][mira]
# Corregir codigos postales erróneos
datasetfinal['CP'] = datasetfinal['CP'].map(deberiaser).fillna(datasetfinal['CP'])
datasetfinal[mira].head()
# Renombrar columnas para crear variables únicas
columns={
'address':'direccion',
'Path': 'URL',
'Certification date': 'usgbc_fecha_cert',
'Rating system':'usgbc_sis_val',
'Version': 'usgbc_ver_sisv',
'Certification level': 'usgbc_nv_cert',
}
datasetfinal = datasetfinal.rename(columns=columns)
datasetfinal.head(2)
# Descripciones de columnas
variables = {
'direccion': 'Ubicacion (Calle y numero)',
'CVE_MUN': 'Clave geoestadística de 5 digitos a nivel municipal, de acuerdo con el Catálogo Único de Claves de Áreas Geoestadísticas Estatales, Municipales y Localidades de INEGI',
'usgbc_fecha_cert': 'Fecha de certificacion como edificio LEED por el United States Green Building Council',
'usgbc_sis_val': 'Sistema de valoracion aplicado por el United States Green Building Council al edificio',
'usgbc_ver_sisv': 'Version del Sistema de valoracion aplicado por el United States Green Building Council al edificio',
'usgbc_nv_cert': 'Nivel de certificacion como edificio LEED alcanzado por el edificio',
'CP': 'Codigo Postal',
'URL': 'Uniform Resource Locator, referencia a recurso en línea'
}
# Convertir descripciones a dataframe
variables = pd.DataFrame.from_dict(variables, orient='index', dtype=None)
variables.columns = ['Descripcion']
variables = variables.rename_axis('Mnemonico')
variables.head()
# Eliminar columnas que ya no se utilizarán y reordenar
setfinal = [
'direccion',
'CVE_MUN',
'usgbc_fecha_cert',
'usgbc_sis_val',
'usgbc_ver_sisv',
'usgbc_nv_cert',
'CP',
'URL']
datasetfinal = datasetfinal[setfinal]
datasetfinal.head()
metadatos = {
'Nombre del Dataset': 'Edificios con Certificación LEED',
'Descripcion del dataset': 'Edificios que han recibido algún nivel de certificación de Liderazgo en Energía y desarrollo Ambiental' \
' (LEED, por sus siglas en ingles) Otorgado por el Consejo de edificios Verdes de Estados Unidos (USGBC' \
' por sus suglas en inglés)',
'Disponibilidad Temporal': '2007 - 2018',
'Periodo de actualizacion': 'No Definido',
'Nivel de Desagregacion': 'Edificio',
'Notas': 's/n',
'Fuente': 'United States Green Buildings Council',
'URL_Fuente': 'https://www.usgbc.org/projects/list?page=17&keys=Mexico',
'Dataset base': None
}
# Metadatos a dataframe para exportar
metadatos = pd.DataFrame.from_dict(metadatos, orient='index', dtype=None)
metadatos.columns = ['Descripcion']
metadatos = metadatos.rename_axis('Metadato')
metadatos
# Guardar el dataset
file = r'D:\PCCS\01_Dmine\Datasets\LEED\PCCS_leed_projects.xlsx'
writer = pd.ExcelWriter(file)
datasetfinal.to_excel(writer, sheet_name = 'DATOS')
metadatos.to_excel(writer, sheet_name = 'METADATOS')
variables.to_excel(writer, sheet_name = 'VARIABLES')
writer.save()
print('---------------TERMINADO---------------')
"""
Explanation: Los edificios que requieren correccion de codigos postales son los siguientes:
End of explanation
"""
|
LucaCanali/Miscellaneous | Impala_SQL_Jupyter/Impala_Basic.ipynb | apache-2.0 | from impala.dbapi import connect
conn = connect(host='impalasrv-test', port=21050)
"""
Explanation: IPython/Jupyter notebooks for Apache Impala
1. Connect to the target database (requires Cloudera impyla package)
End of explanation
"""
cur = conn.cursor()
cur.execute('select * from test2.emp limit 2')
cur.fetchall()
"""
Explanation: 2. Run a query and fetch the results
End of explanation
"""
cur = conn.cursor()
cur.execute('select * from test2.emp')
from impala.util import as_pandas
df = as_pandas(cur)
df.head()
"""
Explanation: Integration with pandas
End of explanation
"""
cur = conn.cursor()
cur.execute('select ename, sal from test2.emp')
df = as_pandas(cur)
%matplotlib inline
import matplotlib
matplotlib.style.use('ggplot')
df.plot()
"""
Explanation: More examples of integration with IPython ecosystem
End of explanation
"""
|
jeffzhengye/pylearn | tensorflow_learning/tf2/notebooks/transfer_learning.ipynb | unlicense | import numpy as np
import tensorflow as tf
from tensorflow import keras
"""
Explanation: Transfer learning & fine-tuning
Author: fchollet<br>
Date created: 2020/04/15<br>
Last modified: 2020/05/12<br>
Description: Complete guide to transfer learning & fine-tuning in Keras.
Setup
End of explanation
"""
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
Explanation: Introduction
Transfer learning consists of taking features learned on one problem, and
leveraging them on a new, similar problem. For instance, features from a model that has
learned to identify racoons may be useful to kick-start a model meant to identify
tanukis.
Transfer learning is usually done for tasks where your dataset has too little data to
train a full-scale model from scratch.
The most common incarnation of transfer learning in the context of deep learning is the
following workflow:
Take layers from a previously trained model.
Freeze them, so as to avoid destroying any of the information they contain during
future training rounds.
Add some new, trainable layers on top of the frozen layers. They will learn to turn
the old features into predictions on a new dataset.
Train the new layers on your dataset.
A last, optional step, is fine-tuning, which consists of unfreezing the entire
model you obtained above (or part of it), and re-training it on the new data with a
very low learning rate. This can potentially achieve meaningful improvements, by
incrementally adapting the pretrained features to the new data.
First, we will go over the Keras trainable API in detail, which underlies most
transfer learning & fine-tuning workflows.
Then, we'll demonstrate the typical workflow by taking a model pretrained on the
ImageNet dataset, and retraining it on the Kaggle "cats vs dogs" classification
dataset.
This is adapted from
Deep Learning with Python
and the 2016 blog post
"building powerful image classification models using very little
data".
Freezing layers: understanding the trainable attribute
Layers & models have three weight attributes:
weights is the list of all weights variables of the layer.
trainable_weights is the list of those that are meant to be updated (via gradient
descent) to minimize the loss during training.
non_trainable_weights is the list of those that aren't meant to be trained.
Typically they are updated by the model during the forward pass.
Example: the Dense layer has 2 trainable weights (kernel & bias)
End of explanation
"""
layer = keras.layers.BatchNormalization()
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
Explanation: In general, all weights are trainable weights. The only built-in layer that has
non-trainable weights is the BatchNormalization layer. It uses non-trainable weights
to keep track of the mean and variance of its inputs during training.
To learn how to use non-trainable weights in your own custom layers, see the
guide to writing new layers from scratch.
Example: the BatchNormalization layer has 2 trainable weights and 2 non-trainable
weights
End of explanation
"""
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
layer.trainable = False # Freeze the layer
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
"""
Explanation: Layers & models also feature a boolean attribute trainable. Its value can be changed.
Setting layer.trainable to False moves all the layer's weights from trainable to
non-trainable. This is called "freezing" the layer: the state of a frozen layer won't
be updated during training (either when training with fit() or when training with
any custom loop that relies on trainable_weights to apply gradient updates).
Example: setting trainable to False
End of explanation
"""
# Make a model with 2 layers
layer1 = keras.layers.Dense(3, activation="relu")
layer2 = keras.layers.Dense(3, activation="sigmoid")
model = keras.Sequential([keras.Input(shape=(3,)), layer1, layer2])
# Freeze the first layer
layer1.trainable = False
# Keep a copy of the weights of layer1 for later reference
initial_layer1_weights_values = layer1.get_weights()
# Train the model
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# Check that the weights of layer1 have not changed during training
final_layer1_weights_values = layer1.get_weights()
np.testing.assert_allclose(
initial_layer1_weights_values[0], final_layer1_weights_values[0]
)
np.testing.assert_allclose(
initial_layer1_weights_values[1], final_layer1_weights_values[1]
)
"""
Explanation: When a trainable weight becomes non-trainable, its value is no longer updated during
training.
End of explanation
"""
inner_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(3, activation="relu"),
keras.layers.Dense(3, activation="relu"),
]
)
model = keras.Sequential(
[keras.Input(shape=(3,)), inner_model, keras.layers.Dense(3, activation="sigmoid"),]
)
model.trainable = False # Freeze the outer model
assert inner_model.trainable == False # All layers in `model` are now frozen
assert inner_model.layers[0].trainable == False # `trainable` is propagated recursively
"""
Explanation: Do not confuse the layer.trainable attribute with the argument training in
layer.__call__() (which controls whether the layer should run its forward pass in
inference mode or training mode). For more information, see the
Keras FAQ.
Recursive setting of the trainable attribute
If you set trainable = False on a model or on any layer that has sublayers,
all children layers become non-trainable as well.
Example:
End of explanation
"""
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
train_ds, validation_ds, test_ds = tfds.load(
"cats_vs_dogs",
# Reserve 10% for validation and 10% for test
split=["train[:40%]", "train[40%:50%]", "train[50%:60%]"],
as_supervised=True, # Include labels
)
print("Number of training samples: %d" % tf.data.experimental.cardinality(train_ds))
print(
"Number of validation samples: %d" % tf.data.experimental.cardinality(validation_ds)
)
print("Number of test samples: %d" % tf.data.experimental.cardinality(test_ds))
"""
Explanation: The typical transfer-learning workflow
This leads us to how a typical transfer learning workflow can be implemented in Keras:
Instantiate a base model and load pre-trained weights into it.
Freeze all layers in the base model by setting trainable = False.
Create a new model on top of the output of one (or several) layers from the base
model.
Train your new model on your new dataset.
Note that an alternative, more lightweight workflow could also be:
Instantiate a base model and load pre-trained weights into it.
Run your new dataset through it and record the output of one (or several) layers
from the base model. This is called feature extraction.
Use that output as input data for a new, smaller model.
A key advantage of that second workflow is that you only run the base model once on
your data, rather than once per epoch of training. So it's a lot faster & cheaper.
An issue with that second workflow, though, is that it doesn't allow you to dynamically
modify the input data of your new model during training, which is required when doing
data augmentation, for instance. Transfer learning is typically used for tasks when
your new dataset has too little data to train a full-scale model from scratch, and in
such scenarios data augmentation is very important. So in what follows, we will focus
on the first workflow.
Here's what the first workflow looks like in Keras:
First, instantiate a base model with pre-trained weights.
python
base_model = keras.applications.Xception(
weights='imagenet', # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False) # Do not include the ImageNet classifier at the top.
Then, freeze the base model.
python
base_model.trainable = False
Create a new model on top.
```python
inputs = keras.Input(shape=(150, 150, 3))
We make sure that the base_model is running in inference mode here,
by passing training=False. This is important for fine-tuning, as you will
learn in a few paragraphs.
x = base_model(inputs, training=False)
Convert features of shape base_model.output_shape[1:] to vectors
x = keras.layers.GlobalAveragePooling2D()(x)
A Dense classifier with a single unit (binary classification)
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
```
Train the model on new data.
python
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
model.fit(new_dataset, epochs=20, callbacks=..., validation_data=...)
Fine-tuning
Once your model has converged on the new data, you can try to unfreeze all or part of
the base model and retrain the whole model end-to-end with a very low learning rate.
This is an optional last step that can potentially give you incremental improvements.
It could also potentially lead to quick overfitting -- keep that in mind.
It is critical to only do this step after the model with frozen layers has been
trained to convergence. If you mix randomly-initialized trainable layers with
trainable layers that hold pre-trained features, the randomly-initialized layers will
cause very large gradient updates during training, which will destroy your pre-trained
features.
It's also critical to use a very low learning rate at this stage, because
you are training a much larger model than in the first round of training, on a dataset
that is typically very small.
As a result, you are at risk of overfitting very quickly if you apply large weight
updates. Here, you only want to readapt the pretrained weights in an incremental way.
This is how to implement fine-tuning of the whole base model:
```python
Unfreeze the base model
base_model.trainable = True
It's important to recompile your model after you make any changes
to the trainable attribute of any inner layer, so that your changes
are take into account
model.compile(optimizer=keras.optimizers.Adam(1e-5), # Very low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
Train end-to-end. Be careful to stop before you overfit!
model.fit(new_dataset, epochs=10, callbacks=..., validation_data=...)
```
Important note about compile() and trainable
Calling compile() on a model is meant to "freeze" the behavior of that model. This
implies that the trainable
attribute values at the time the model is compiled should be preserved throughout the
lifetime of that model,
until compile is called again. Hence, if you change any trainable value, make sure
to call compile() again on your
model for your changes to be taken into account.
Important notes about BatchNormalization layer
Many image models contain BatchNormalization layers. That layer is a special case on
every imaginable count. Here are a few things to keep in mind.
BatchNormalization contains 2 non-trainable weights that get updated during
training. These are the variables tracking the mean and variance of the inputs.
When you set bn_layer.trainable = False, the BatchNormalization layer will
run in inference mode, and will not update its mean & variance statistics. This is not
the case for other layers in general, as
weight trainability & inference/training modes are two orthogonal concepts.
But the two are tied in the case of the BatchNormalization layer.
When you unfreeze a model that contains BatchNormalization layers in order to do
fine-tuning, you should keep the BatchNormalization layers in inference mode by
passing training=False when calling the base model.
Otherwise the updates applied to the non-trainable weights will suddenly destroy
what the model has learned.
You'll see this pattern in action in the end-to-end example at the end of this guide.
Transfer learning & fine-tuning with a custom training loop
If instead of fit(), you are using your own low-level training loop, the workflow
stays essentially the same. You should be careful to only take into account the list
model.trainable_weights when applying gradient updates:
```python
Create base model
base_model = keras.applications.Xception(
weights='imagenet',
input_shape=(150, 150, 3),
include_top=False)
Freeze base model
base_model.trainable = False
Create new model on top.
inputs = keras.Input(shape=(150, 150, 3))
x = base_model(inputs, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam()
Iterate over the batches of a dataset.
for inputs, targets in new_dataset:
# Open a GradientTape.
with tf.GradientTape() as tape:
# Forward pass.
predictions = model(inputs)
# Compute the loss value for this batch.
loss_value = loss_fn(targets, predictions)
# Get gradients of loss wrt the *trainable* weights.
gradients = tape.gradient(loss_value, model.trainable_weights)
# Update the weights of the model.
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
```
Likewise for fine-tuning.
An end-to-end example: fine-tuning an image classification model on a cats vs. dogs
dataset
To solidify these concepts, let's walk you through a concrete end-to-end transfer
learning & fine-tuning example. We will load the Xception model, pre-trained on
ImageNet, and use it on the Kaggle "cats vs. dogs" classification dataset.
Getting the data
First, let's fetch the cats vs. dogs dataset using TFDS. If you have your own dataset,
you'll probably want to use the utility
tf.keras.preprocessing.image_dataset_from_directory to generate similar labeled
dataset objects from a set of images on disk filed into class-specific folders.
Transfer learning is most useful when working with very small datasets. To keep our
dataset small, we will use 40% of the original training data (25,000 images) for
training, 10% for validation, and 10% for testing.
End of explanation
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")
"""
Explanation: These are the first 9 images in the training dataset -- as you can see, they're all
different sizes.
End of explanation
"""
size = (150, 150)
train_ds = train_ds.map(lambda x, y: (tf.image.resize(x, size), y))
validation_ds = validation_ds.map(lambda x, y: (tf.image.resize(x, size), y))
test_ds = test_ds.map(lambda x, y: (tf.image.resize(x, size), y))
"""
Explanation: We can also see that label 1 is "dog" and label 0 is "cat".
Standardizing the data
Our raw images have a variety of sizes. In addition, each pixel consists of 3 integer
values between 0 and 255 (RGB level values). This isn't a great fit for feeding a
neural network. We need to do 2 things:
Standardize to a fixed image size. We pick 150x150.
Normalize pixel values between -1 and 1. We'll do this using a Normalization layer as
part of the model itself.
In general, it's a good practice to develop models that take raw data as input, as
opposed to models that take already-preprocessed data. The reason being that, if your
model expects preprocessed data, any time you export your model to use it elsewhere
(in a web browser, in a mobile app), you'll need to reimplement the exact same
preprocessing pipeline. This gets very tricky very quickly. So we should do the least
possible amount of preprocessing before hitting the model.
Here, we'll do image resizing in the data pipeline (because a deep neural network can
only process contiguous batches of data), and we'll do the input value scaling as part
of the model, when we create it.
Let's resize images to 150x150:
End of explanation
"""
batch_size = 32
train_ds = train_ds.cache().batch(batch_size).prefetch(buffer_size=10)
validation_ds = validation_ds.cache().batch(batch_size).prefetch(buffer_size=10)
test_ds = test_ds.cache().batch(batch_size).prefetch(buffer_size=10)
"""
Explanation: Besides, let's batch the data and use caching & prefetching to optimize loading speed.
End of explanation
"""
from tensorflow import keras
from tensorflow.keras import layers
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomRotation(0.1),
]
)
"""
Explanation: Using random data augmentation
When you don't have a large image dataset, it's a good practice to artificially
introduce sample diversity by applying random yet realistic transformations to
the training images, such as random horizontal flipping or small random rotations. This
helps expose the model to different aspects of the training data while slowing down
overfitting.
End of explanation
"""
import numpy as np
for images, labels in train_ds.take(1):
plt.figure(figsize=(10, 10))
first_image = images[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(
tf.expand_dims(first_image, 0), training=True
)
plt.imshow(augmented_image[0].numpy().astype("int32"))
plt.title(int(labels[i]))
plt.axis("off")
"""
Explanation: Let's visualize what the first image of the first batch looks like after various random
transformations:
End of explanation
"""
base_model = keras.applications.Xception(
weights="imagenet", # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False,
) # Do not include the ImageNet classifier at the top.
# Freeze the base_model
base_model.trainable = False
# Create new model on top
inputs = keras.Input(shape=(150, 150, 3))
x = data_augmentation(inputs) # Apply random data augmentation
# Pre-trained Xception weights requires that input be normalized
# from (0, 255) to a range (-1., +1.), the normalization layer
# does the following, outputs = (inputs - mean) / sqrt(var)
norm_layer = keras.layers.experimental.preprocessing.Normalization()
mean = np.array([127.5] * 3)
var = mean ** 2
# Scale inputs to [-1, +1]
x = norm_layer(x)
norm_layer.set_weights([mean, var])
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.summary()
"""
Explanation: Build a model
Now let's built a model that follows the blueprint we've explained earlier.
Note that:
We add a Normalization layer to scale input values (initially in the [0, 255]
range) to the [-1, 1] range.
We add a Dropout layer before the classification layer, for regularization.
We make sure to pass training=False when calling the base model, so that
it runs in inference mode, so that batchnorm statistics don't get updated
even after we unfreeze the base model for fine-tuning.
End of explanation
"""
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 20
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""
Explanation: Train the top layer
End of explanation
"""
# Unfreeze the base_model. Note that it keeps running in inference mode
# since we passed `training=False` when calling it. This means that
# the batchnorm layers will not update their batch statistics.
# This prevents the batchnorm layers from undoing all the training
# we've done so far.
base_model.trainable = True
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(1e-5), # Low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 10
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""
Explanation: Do a round of fine-tuning of the entire model
Finally, let's unfreeze the base model and train the entire model end-to-end with a low
learning rate.
Importantly, although the base model becomes trainable, it is still running in
inference mode since we passed training=False when calling it when we built the
model. This means that the batch normalization layers inside won't update their batch
statistics. If they did, they would wreck havoc on the representations learned by the
model so far.
End of explanation
"""
|
NlGG/MachineLearning | NeuralNetwork/auto_encorder_and_rnn.ipynb | mit | %matplotlib inline
import numpy as np
import pylab as pl
import math
from sympy import *
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from nn import NN
"""
Explanation: 今回のレポートでは、①オートエンコーダの作成、②再帰型ニューラルネットワークの作成を試みた。
①コブダクラス型生産関数を再現できるオートエンコーダの作成が目標である。
End of explanation
"""
def example1(x_1, x_2):
z = x_1**0.5*x_2*0.5
return z
fig = pl.figure()
ax = Axes3D(fig)
X = np.arange(0, 1, 0.1)
Y = np.arange(0, 1, 0.1)
X, Y = np.meshgrid(X, Y)
Z = example1(X, Y)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1)
pl.show()
"""
Explanation: 定義域は0≤x≤1である。
<P>コブ・ダグラス型生産関数は以下の通りである。</P>
<P>z = x_1**0.5*x_2*0.5</P>
End of explanation
"""
nn = NN()
"""
Explanation: NNのクラスはすでにNN.pyからimportしてある。
End of explanation
"""
x_1 = Symbol('x_1')
x_2 = Symbol('x_2')
f = x_1**0.5*x_2*0.5
"""
Explanation: 以下に使い方を説明する。
初めに、このコブ・ダグラス型生産関数を用いる。
End of explanation
"""
nn.set_input_layer(2)
nn.set_hidden_layer(2)
nn.set_output_layer(2)
"""
Explanation: 入力層、中間層、出力層を作る関数を実行する。引数には層の数を用いる。
End of explanation
"""
nn.setup()
nn.initialize()
"""
Explanation: <p>nn.set_hidden_layer()は同時にシグモイド関数で変換する前の中間層も作る。</p>
<p>set_output_layer()は同時にシグモイド関数で変換する前の出力層、さらに教師データを入れる配列も作る。</p>
nn.setup()で入力層ー中間層、中間層ー出力層間の重みを入れる配列を作成する。
nn.initialize()で重みを初期化する。重みは-1/√d ≤ w ≤ 1/√d (dは入力層及び中間層の数)の範囲で一様分布から決定される。
End of explanation
"""
idata = [1, 2]
nn.supervised_function(f, idata)
"""
Explanation: nn.supervised_function(f, idata)は教師データを作成する。引数は関数とサンプルデータをとる。
End of explanation
"""
nn.simulate(1, 0.1)
"""
Explanation: nn.simulate(N, eta)は引数に更新回数と学習率をとる。普通はN=1で行うべきかもしれないが、工夫として作成してみた。N回学習した後に出力層を返す。
End of explanation
"""
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
print X, Y
"""
Explanation: nn.calculation()は学習せずに入力層から出力層の計算を行う。nn.simulate()内にも用いられている。
次に実際に学習を行う。サンプルデータは、
End of explanation
"""
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network()
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
for i in range(100):
l = np.random.choice([i for i in range(len(a))])
m = nn.main2(1, f, [a[l], b[l]], 0.5)
for x in X:
for y in Y:
idata = [x, y]
c = np.append(c, nn.realize(f, idata))
a
b
c
"""
Explanation: の組み合わせである。
End of explanation
"""
fig = pl.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c)
pl.show()
"""
Explanation: 例えば(0, 0)を入力すると0.52328635を返している(つまりa[0]とb[0]を入力して、c[0]の値を返している)。
ここでは交差検定は用いていない。
End of explanation
"""
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network()
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
for i in range(10000):
l = np.random.choice([i for i in range(len(a))])
m = nn.main2(1, f, [a[l], b[l]], 0.5)
for x in X:
for y in Y:
idata = [x, y]
c = np.append(c, nn.realize(f, idata))
fig = pl.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c)
pl.show()
"""
Explanation: 確率的勾配降下法を100回繰り返したが見た感じから近づいている。回数を10000回に増やしてみる。
End of explanation
"""
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
evl = np.array([])
for i in range(len(a)):
nn = NN()
nn.set_network()
for j in range(1):
l = np.random.choice([i for i in range(len(a))])
if l != i:
nn.main2(1, f, [a[l], b[l]], 0.5)
idata = [a[i], b[i]]
est = nn.realize(f, idata)
evl = np.append(evl, math.fabs(est - nn.supervised_data))
np.average(evl)
"""
Explanation: 見た感じ随分近づいているように見える。
最後に交差検定を行う。
初めに学習回数が極めて少ないNNである。
End of explanation
"""
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network(h=7)
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
evl = np.array([])
for i in range(len(a)):
nn = NN()
nn.set_network()
for j in range(100):
l = np.random.choice([i for i in range(len(a))])
if l != i:
nn.main2(1, f, [a[l], b[l]], 0.5)
idata = [a[i], b[i]]
evl = np.append(evl, math.fabs(nn.realize(f, idata) - nn.supervised_data))
np.average(evl)
"""
Explanation: 次に十分大きく(100回に)してみる。
End of explanation
"""
nn = NN()
nn.set_network()
X = np.arange(0, 1, 0.05)
Y = np.arange(0, 1, 0.05)
a = np.array([])
b = np.array([])
c = np.array([])
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
evl = np.array([])
s = [i for i in range(len(a))]
for j in range(1000):
l = np.random.choice(s)
nn.main2(1, f, [a[l], b[l]], 0.5)
c = np.array([])
for i in range(len(a)):
idata = [a[i], b[i]]
c = np.append(c, nn.realize(f, idata))
fig = pl.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c)
pl.show()
"""
Explanation: 誤差の平均であるので小さい方よい。
学習回数を増やした結果、精度が上がった。
最後にオートエンコーダを作成する。回数を増やした方がよいことが分かったため、10000回学習させてみる。
End of explanation
"""
from nn import RNN
"""
Explanation: 十分再現できていることが分かる。
②ゲーム理論で用いられるTit for Tatを再現してみる。二人のプレーヤーが互いにRNNで相手の行動を予測し、相手の行動に対してTit for Tatに基づいた行動を選択する。
End of explanation
"""
nn1 = RNN()
nn1.set_network()
nn2 = RNN()
nn2.set_network()
idata1 = [[1, 0]]
idata2 = [[0, 1]]
sdata1 = [[0]]
sdata2 = [[1]]
for t in range(10):
for i in range(10):
nn1.main2(idata1, sdata2, 0.9)
nn2.main2(idata2, sdata1, 0.9)
idata1.append([sdata1[-1][0], sdata2[-1][0]])
idata2.append([idata1[-1][1], idata1[-1][0]])
n1r = nn1.realize(idata1)
n2r = nn2.realize(idata1)
sdata1.append([np.random.choice([1, 0], p=[n1r, 1-n1r])])
sdata2.append([np.random.choice([1, 0], p=[n2r, 1-n2r])])
idata.append([sdata1[-1][0], sdata2[-1][0]])
print nn1.realize(idata1), nn2.realize(idata2), idata1
"""
Explanation: 最初の行動はRNNで指定できないので、所与となる。この初期値と裏切りに対する感応度で収束の仕方が決まる。
協調を1、裏切りを0としている。RNNの予測値は整数値でないが、p=(RNNの出力値)で次回に協調を行う。
例1:1期目に、プレーヤー1が協力、プレーヤー2が裏切り。
End of explanation
"""
p1 = []
p2 = []
for i in range(len(idata1)):
p1.append(idata1[i][0])
for i in range(len(idata2)):
p2.append(idata2[i][0])
p1 = plt.plot(p1, label="player1")
p2 = plt.plot(p2, label="player2")
plt.legend()
"""
Explanation: 下の図より、最初は交互に相手にしっぺ返しをしているが、やがて両者が裏切り合うこと状態に収束する。
End of explanation
"""
nn1 = RNN()
nn1.set_network()
nn2 = RNN()
nn2.set_network()
idata1 = [[1, 1]]
idata2 = [[1, 1]]
sdata1 = [[1]]
sdata2 = [[1]]
for t in range(10):
for i in range(10):
nn1.main2(idata1, sdata2, 0.9)
nn2.main2(idata2, sdata1, 0.9)
idata1.append([sdata1[-1][0], sdata2[-1][0]])
idata2.append([idata1[-1][1], idata1[-1][0]])
n1r = nn1.realize(idata1)
n2r = nn2.realize(idata1)
prob1 = n1r
prob2 = n2r - 0.3
if prob2 < 0:
prob2 = 0
sdata1.append([np.random.choice([1, 0], p=[prob1, 1-prob1])])
sdata2.append([np.random.choice([1, 0], p=[prob2, 1-prob2])])
idata.append([sdata1[-1][0], sdata2[-1][0]])
print nn1.realize(idata1), nn2.realize(idata2), idata1
p1 = []
p2 = []
for i in range(len(idata1)):
p1.append(idata1[i][0])
for i in range(len(idata2)):
p2.append(idata2[i][0])
p1 = plt.plot(p1, label="player1")
p2 = plt.plot(p2, label="player2")
plt.legend()
"""
Explanation: 例2:1期目に、プレーヤー1が協力、プレーヤー2が協力。ただし、プレーヤー2は相手の裏切りをかなり警戒している。
警戒を表すためにp=(RNNの出力値 - 0.2)とする。p<0の場合はp=0に直す。
End of explanation
"""
nn1 = RNN()
nn1.set_network()
nn2 = RNN()
nn2.set_network()
idata1 = [[1, 0]]
idata2 = [[0, 1]]
sdata1 = [[0]]
sdata2 = [[1]]
for t in range(10):
for i in range(10):
nn1.main2(idata1, sdata2, 0.9)
nn2.main2(idata2, sdata1, 0.9)
idata1.append([sdata1[-1][0], np.random.choice([sdata2[-1][0], 1-sdata2[-1][0]], p=[0.8, 0.2])])
idata2.append([sdata2[-1][0], np.random.choice([sdata1[-1][0], 1-sdata1[-1][0]], p=[0.8, 0.2])])
n1r = nn1.realize(idata1)
n2r = nn2.realize(idata1)
prob1 = n1r
prob2 = n2r
sdata1.append([np.random.choice([1, 0], p=[prob1, 1-prob1])])
sdata2.append([np.random.choice([1, 0], p=[prob2, 1-prob2])])
idata.append([sdata1[-1][0], sdata2[-1][0]])
print nn1.realize(idata1), nn2.realize(idata2), idata1
p1 = []
p2 = []
for i in range(len(idata1)):
p1.append(idata1[i][0])
for i in range(len(idata2)):
p2.append(idata2[i][0])
p1 = plt.plot(p1, label="player1")
p2 = plt.plot(p2, label="player2")
plt.legend()
"""
Explanation: 例3:次に相手の行動を完全には観測できない場合を考える。t期の相手の行動をt+1期にノイズが加わって知る。例えば、1期目に相手が協調したことを、確率90%で2期目に正しく知れるが、10%で裏切りと誤って伝わる場合である。
ノイズは20%の確率で加わるものとする。その他の条件は例1と同じにした。
End of explanation
"""
|
GuidoBR/python-for-finance | python-for-finance-investment-fundamentals-data-analytics/1 - Calculating and Comparing Rates of Return in Python/Rate of Return.ipynb | mit | BRK['simple_return'] = (BRK['Close'] / BRK['Close'].shift(1)) - 1
print(BRK['simple_return'])
BRK['simple_return'].plot(figsize=(8,5))
plt.show()
avg_returns_d = BRK['simple_return'].mean()
avg_returns_d
avg_returns_a = avg_returns_d * 250 # multiply by the average number of business days per year
print(str(round(avg_returns_a, 5) * 100) + ' %')
"""
Explanation: Simple Rate of Return
$$
\frac{P_1 - P_0}{P_0} = \frac{P_1}{P_0} - 1
$$
Adj Close is "Adjusted close price", adjusted by dividends and other events at the close of the market at that date.
End of explanation
"""
BRK.head()
BRK['log_return'] = np.log(BRK['Close'] / BRK['Close'].shift(1))
print(BRK['log_return'])
BRK['log_return'].plot(figsize=(8, 5))
plt.show()
log_return_d = BRK['log_return'].mean()
log_return_d
log_return_a = BRK['log_return'].mean() * 250
log_return_a
print(str(round(log_return_a, 5) * 100) + ' %')
"""
Explanation: Logarithmic Return
$$
ln(\frac{P_t}{P_{t-1}})
$$
End of explanation
"""
|
jotterbach/Data-Exploration-and-Numerical-Experimentation | Numerical-Experimentation/Series of N equals in coin tosses.ipynb | cc0-1.0 | import random as rd
import numpy as np
from numpy.random import choice
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('ggplot')
matplotlib.rc_params_from_file("../styles/matplotlibrc" ).update()
"""
Explanation: Series of N equals in coin tosses
For a fair coin, the question if it lands heads or tails is easily answered. Almost as simple is the question of what is the probability of having N heads (tails) in a row. However, it is not immediately obvious how we would answer a question like "How many tosses do we need to make before we will observe a series of N heads (tails)?". The answer is easily computed numerically and we will show an analytical solution in the end.
Numerical experiment
Python imports
End of explanation
"""
list_of_categories = ["H", "T"]
def initializeExperiment(N, prob = [0.5, 0.5]):
tosses = []
for idx in range(N):
tosses.append(choice(list_of_categories, p = prob))
return tosses
"""
Explanation: Constructing the experiment
It is obvious that to observe a series of N equals we need to have done N throws at least. Hence we initialize the experiment by tossing the coin N times. We define the categories as "H" and "T"
End of explanation
"""
def areLastNTossesEqualy(tosses, N, category):
subset = set(tosses[-N:])
if ((len(subset) == 1) and (category in subset)):
return True
else:
return False
"""
Explanation: Next we need to check if the last N throws have been equal to the category we want to observe. To do this we construct a set of the last N tosses. If the size of the set is 1 and the category in the set is the one we are looking for we found a sequence of N equal tosses.
End of explanation
"""
def runSingleExperiment(max_num_throws, number_of_equals, prob = [0.5,0.5]):
tosses = initializeExperiment(number_of_equals, prob)
throws = 0
while throws < max_num_throws:
if areLastNTossesEqualy(tosses, number_of_equals, "H"):
return len(tosses)
else:
tosses.append(choice(list_of_categories))
throws += 1
"""
Explanation: Running the experiment
Since we have no prior knowledge of when the experiment will terminate we limit ourselves to a maximum number of tosses. We always check if the last N tosses have been heads (H). If yes, we terminate otherwise we continue with another toss.
End of explanation
"""
def runKExperimentsAndEvaluate(m_experiments, number_of_equals, number_of_maximum_tosses=500, prob = [0.5,0.5]):
number_of_tosses = []
for idx in range(m_experiments):
number_of_tosses.append(runSingleExperiment(number_of_maximum_tosses, number_of_equals, prob))
return np.mean(number_of_tosses), np.std(number_of_tosses), number_of_tosses
"""
Explanation: Finally we want to run M experiments and evaluate for the expected number of throws.
End of explanation
"""
print "We expect to observe 3 heads after %3.2f tosses" % runKExperimentsAndEvaluate(5000, 3)[0]
"""
Explanation: So for 3 heads in a row, what's the expected number of tosses to observe this event"
End of explanation
"""
tosses_three_equals = runKExperimentsAndEvaluate(25000, 3, number_of_maximum_tosses=1000)[2]
tosses_four_equals = runKExperimentsAndEvaluate(25000, 4, number_of_maximum_tosses=1000)[2]
tosses_five_equals = runKExperimentsAndEvaluate(25000, 5, number_of_maximum_tosses=1000)[2]
bin_range = range(0,150, 2)
plt.hist(tosses_three_equals, bins=bin_range, normed=True)
plt.hist(tosses_four_equals, bins=bin_range, normed=True)
plt.hist(tosses_five_equals, bins=bin_range, normed=True)
plt.xlim([0,100])
"""
Explanation: As we will see later the non-integer nature of this expectation value is a residual of the numerical procedure we employed and it could easily be cast to an integer.
Before we get into the mathematical formulation of the problem, let's study the distribution a little bit more. A good way to gain some insight into the distribution is to look at the normalized histogram of the number of total throws until success. To this end we take a look at the raw tosses returned by runKExperimentsAndEvaluate
End of explanation
"""
def expectationValueForNumberOfTosses(p, number_of_equals):
return int(float(1 - np.power(p, number_of_equals))/float(np.power(p, number_of_equals) * (1-p)))
equals = np.linspace(1,20, 20)
y = []
for x in equals:
y.append(expectationValueForNumberOfTosses(0.5, x))
plt.semilogy( equals, y, 'o')
"""
Explanation: Maybe surprisingly the distribution is not very well localized. In fact trying to fit it with an exponential function given the calculated mean fails. Increasing the number of required equals makes the curve flatter and more heavy tailed. Thus the variance itself is also large. In fact it is of the same order as the mean!
Mathematical calculation of the expectation value
Let us now turn towards the mathematical calculation of the expectation value. Note that we define success as the event $X$ where we have $n$ heads in a row. It is then easy to see that in whenever we observe tails we essentially waisted all tries before and need to start over again. However, we need to keep track of the wasted throws. If $p$ is the probability of having the coin land on head then the expectation value can be expressed simply by
\begin{align}
E(X) = np^n + (1-p) \left[E(X) +1\right] + p(1-p) \left[E(X) +2\right] + p^2(1-p) \left[E(X) +3\right]... = np^n + (1-p)\sum\limits_{i=0}^{n-1}p^i\left[E(X)+i+1\right)
\end{align}
Evaluating the sum we find the simple expression:
\begin{align}
E(X) = \frac{1-p^n}{p^n(1-p)}
\end{align}
To gain some understanding why above formula works let us look closer at the case for two eqaual tosses. In this case we have success if $X=HH$. To calculate the probability of this event happening we condition on the first toss, i.e. we calculate
\begin{align}
E(X) = E(X|H) p(H) + E(X|T) p(T)
\end{align}
where the condition is the result of the first toss. Expanding this some more we get
\begin{align}
E(X) = p^2 + (1-p) \left[E(X) + 1\right]
\end{align}
where we employed the same argument as above.
End of explanation
"""
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
"""
Explanation: From the plot aboe we see that the number of tosses until we have $N$ equals grows exponential! (Observe the logarithmic scale). For $N=20$ heads in a row we need on the order of 2 million successive throws for a fair coin. If we could manually throw a coin every second, it would take us about 23 days of uniterupted coin tossing until we could expect 20 heads in a row to appear... but remember: The distribution has a heavy tail and the variance is also on the order of 23 days!
Summary
We did some numerical experiments to investigate the question how many times we need to throw a coin before we can expect $N$ heads in a row. The distribution of this problem is surprisingly heavy tailed and becomes heavier the more heads are required. We saw that the variance is as large as the expectation value, which grows exponential with the number of required equals as we proved mathematically.
Enjoy this notebook! Contributions and/or feedback are welcome as always!
Stylesheet
End of explanation
"""
|
graphistry/pygraphistry | demos/more_examples/graphistry_features/encodings-colors.ipynb | bsd-3-clause | # ! pip install --user graphistry
import graphistry
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
graphistry.__version__
import datetime, pandas as pd
e_df = pd.DataFrame({
's': ['a', 'b', 'c', 'a', 'd', 'e'],
'd': ['b', 'c', 'a', 'c', 'e', 'd'],
'time': [datetime.datetime(1987, 10, 1), datetime.datetime(1987, 10, 2), datetime.datetime(1987, 10, 3),
datetime.datetime(1988, 10, 1), datetime.datetime(1988, 10, 2), datetime.datetime(1988, 10, 3)]
})
n_df = pd.DataFrame({
'n': ['a', 'b', 'c', 'd', 'e'],
'score': [ 0, 30, 50, 70, 90 ],
'palette_color_int32': pd.Series(
[0, 1, 2, 3, 4],
dtype='int32'),
'hex_color_int64': pd.Series(
[0xFF000000, 0xFFFF0000, 0xFFFFFF00, 0x00FF0000, 0x0000FF00],
dtype='int64'),
'type': ['mac', 'macbook', 'mac', 'macbook', 'sheep']
})
g = graphistry.edges(e_df, 's', 'd').nodes(n_df, 'n')
"""
Explanation: Color encodings tutorial
See the examples below for common ways to map data to node/edge color in Graphistry.
Colors are often used with node size, icon, label, and badges to provide more visual information. Most encodings work both for points and edges. The PyGraphistry Python client makes it easier to use the URL settings API and the REST upload API. For dynamic control, you can use also use the JavaScript APIs.
Setup
Mode api=3 is recommended. It is required for complex_encodings (ex: .encode_point_color(...)). Mode api=1 works with the simpler .bind(point_color='col_a') form.
End of explanation
"""
g.plot()
"""
Explanation: Colors
Options: default, built-in palette, RGBA, continuous palette, and categorical mapping
Applies to both nodes and edges
Use the .encode_point_color() and .encode_edge_color() calls
For palette and RGBA bindings (non-complex), you can also use the shorthand .bind(point_color='col_a', edge_color='col_b').
Default
Node: Graphistry looks at the local graph structure to auto-color nodes
Edges: Gradient from the src/dst node color to reinforce the node color decision
End of explanation
"""
print(g._nodes['palette_color_int32'].dtype)
g.encode_point_color('palette_color_int32').plot()
"""
Explanation: Built-in palette
Bind an int32 column where values are intepreted by the predefined color palette tabble
End of explanation
"""
print(g._nodes['hex_color_int64'].dtype)
g.encode_point_color('hex_color_int64').plot()
"""
Explanation: RGBA colors
End of explanation
"""
g.encode_point_color('score', palette=['silver', 'maroon', '#FF99FF'], as_continuous=True).plot()
"""
Explanation: Continuous colors
Create a gradient effect by linearly mapping the input column to an evenly-spaced palette.
Great for tasks like mapping timestamps, counts, and scores to low/high and low/medium/high intervals.
End of explanation
"""
g.encode_point_color(
'type',
categorical_mapping={
'mac': '#F99',
'macbook': '#99F'
},
default_mapping='silver'
).plot()
"""
Explanation: Categorical colors
Map distinct values to specific colors. Optionally, set a default, else black.
End of explanation
"""
g.encode_edge_color('time', palette=['blue', 'red'], as_continuous=True).plot()
"""
Explanation: Edge colors
Edge colors work the same as node colors by switching to call .encode_edge_color():
End of explanation
"""
g.encode_point_color(
'type',
categorical_mapping={
'mac': '#F99',
'macbook': '#99F'
},
default_mapping='silver'
).plot()
"""
Explanation: Legend support
Categorical node colors will appear in legend when driven by column type:
End of explanation
"""
|
vascotenner/holoviews | doc/Tutorials/Elements.ipynb | bsd-3-clause | import holoviews as hv
hv.notebook_extension()
hv.Element(None, group='Value', label='Label')
"""
Explanation: Elements are the basic building blocks for any HoloViews visualization. These are the objects that can be composed together using the various Container types.
Here in this overview, we show an example of how to build each of these Elements directly out of Python or Numpy data structures. An even more powerful way to use them is by collecting similar Elements into a HoloMap, as described in Exploring Data, so that you can explore, select, slice, and animate them flexibly, but here we focus on having small, self-contained examples. Complete reference material for each type can be accessed using our documentation system. This tutorial uses the default matplotlib plotting backend; see the Bokeh Elements tutorial for the corresponding bokeh plots.
Element types
This class hierarchy shows each of the Element types.
Each type is named for the default or expected way that the underlying data can be visualized. E.g., if your data is wrapped into a Surface object, it will display as a 3D surface by default, whereas the same data embedded in an Image object will display as a 2D raster image. But please note that the specification and implementation for each Element type does not actually include any such visualization -- the name merely serves as a semantic indication that you ordinarily think of the data as being laid out visually in that way. The actual plotting is done by a separate plotting subsystem, while the objects themselves focus on storing your data and the metadata needed to describe and use it.
This separation of data and visualization is described in detail in the Options tutorial, which describes all about how to find out the options available for each Element type and change them if necessary, from either Python or IPython Notebook. When using this tutorial interactively in an IPython/Jupyter notebook session, we suggest adding %output info=True after the call to notebook_extension below, which will pop up a detailed list and explanation of the available options for visualizing each Element type, after that notebook cell is executed. Then, to find out all the options for any of these Element types, just press <Shift-Enter> on the corresponding cell in the live notebook.
The types available:
<dl class="dl-horizontal">
<dt><a href="#Element"><code>Element</code></a></dt><dd>The base class of all <code>Elements</code>.</dd>
</dl>
<a id='ChartIndex'></a> <a href="#Chart Elements"><code>Charts:</code></a>
<dl class="dl-horizontal">
<dt><a href="#Curve"><code>Curve</code></a></dt><dd>A continuous relation between a dependent and an independent variable.</dd>
<dt><a href="#ErrorBars"><code>ErrorBars</code></a></dt><dd>A collection of x-/y-coordinates with associated error magnitudes.</dd>
<dt><a href="#Spread"><code>Spread</code></a></dt><dd>Continuous version of ErrorBars.</dd>
<dt><a href="#Area"><code>Area</code></a></dt><dd></dd>
<dt><a href="#Bars"><code>Bars</code></a></dt><dd>Data collected and binned into categories.</dd>
<dt><a href="#Histogram"><code>Histogram</code></a></dt><dd>Data collected and binned in a continuous space using specified bin edges.</dd>
<dt><a href="#BoxWhisker"><code>BoxWhisker</code></a></dt><dd>Distributions of data varying by 0-N key dimensions.</dd>
<dt><a href="#Scatter"><code>Scatter</code></a></dt><dd>Discontinuous collection of points indexed over a single dimension.</dd>
<dt><a href="#Points"><code>Points</code></a></dt><dd>Discontinuous collection of points indexed over two dimensions.</dd>
<dt><a href="#VectorField"><code>VectorField</code></a></dt><dd>Cyclic variable (and optional auxiliary data) distributed over two-dimensional space.</dd>
<dt><a href="#Spikes"><code>Spikes</code></a></dt><dd>A collection of horizontal or vertical lines at various locations with fixed height (1D) or variable height (2D).</dd>
<dt><a href="#SideHistogram"><code>SideHistogram</code></a></dt><dd>Histogram binning data contained by some other <code>Element</code>.</dd>
</dl>
<a id='Chart3DIndex'></a> <a href="#Chart3D Elements"><code>Chart3D Elements:</code></a>
<dl class="dl-horizontal">
<dt><a href="#Surface"><code>Surface</code></a></dt><dd>Continuous collection of points in a three-dimensional space.</dd>
<dt><a href="#Scatter3D"><code>Scatter3D</code></a></dt><dd>Discontinuous collection of points in a three-dimensional space.</dd>
<dt><a href="#Trisurface"><code>Trisurface</code></a></dt><dd>Continuous but irregular collection of points interpolated into a Surface using Delaunay triangulation.</dd>
</dl>
<a id='RasterIndex'></a> <a href="#Raster Elements"><code>Raster Elements:</code></a>
<dl class="dl-horizontal">
<dt><a href="#Raster"><code>Raster</code></a></dt><dd>The base class of all rasters containing two-dimensional arrays.</dd>
<dt><a href="#QuadMesh"><code>QuadMesh</code></a></dt><dd>Raster type specifying 2D bins with two-dimensional array of values.</dd>
<dt><a href="#HeatMap"><code>HeatMap</code></a></dt><dd>Raster displaying sparse, discontinuous data collected in a two-dimensional space.</dd>
<dt><a href="#Image"><code>Image</code></a></dt><dd>Raster containing a two-dimensional array covering a continuous space (sliceable).</dd>
<dt><a href="#RGB"><code>RGB</code></a></dt><dd>Image with 3 (R,G,B) or 4 (R,G,B,Alpha) color channels.</dd>
<dt><a href="#HSV"><code>HSV</code></a></dt><dd>Image with 3 (Hue, Saturation, Value) or 4 channels.</dd>
</dl>
<a id='TabularIndex'></a> <a href="#Tabular Elements"><code>Tabular Elements:</code></a>
<dl class="dl-horizontal">
<dt><a href="#ItemTable"><code>ItemTable</code></a></dt><dd>Ordered collection of key-value pairs (ordered dictionary).</dd>
<dt><a href="#Table"><code>Table</code></a></dt><dd>Collection of arbitrary data with arbitrary key and value dimensions.</dd>
</dl>
<a id='AnnotationIndex'></a> <a href="#Annotation Elements"><code>Annotations:</code></a>
<dl class="dl-horizontal">
<dt><a href="#VLine"><code>VLine</code></a></dt><dd>Vertical line annotation.</dd>
<dt><a href="#HLine"><code>HLine</code></a></dt><dd>Horizontal line annotation.</dd>
<dt><a href="#Spline"><code>Spline</code></a></dt><dd>Bezier spline (arbitrary curves).</dd>
<dt><a href="#Text"><code>Text</code></a></dt><dd>Text annotation on an <code>Element</code>.</dd>
<dt><a href="#Arrow"><code>Arrow</code></a></dt><dd>Arrow on an <code>Element</code> with optional text label.</dd>
</dl>
<a id='PathIndex'></a> <a href="#Path Elements"><code>Paths:</code></a>
<dl class="dl-horizontal">
<dt><a href="#Path"><code>Path</code></a></dt><dd>Collection of paths.</dd>
<dt><a href="#Contours"><code>Contours</code></a></dt><dd>Collection of paths, each with an associated value.</dd>
<dt><a href="#Polygons"><code>Polygons</code></a></dt><dd>Collection of filled, closed paths with an associated value.</dd>
<dt><a href="#Bounds"><code>Bounds</code></a></dt><dd>Box specified by corner positions.</dd>
<dt><a href="#Box"><code>Box</code></a></dt><dd>Box specified by center position, radius, and aspect ratio.</dd>
<dt><a href="#Ellipse"><code>Ellipse</code></a></dt><dd>Ellipse specified by center position, radius, and aspect ratio.</dd>
</dl>
Element <a id='Element'></a>
The basic or fundamental types of data that can be visualized.
Element is the base class for all the other HoloViews objects shown in this section.
All Element objects accept data as the first argument to define the contents of that element. In addition to its implicit type, each element object has a group string defining its category, and a label naming this particular item, as described in the Introduction.
When rich display is off, or if no visualization has been defined for that type of Element, the Element is presented with a default textual representation:
End of explanation
"""
import numpy as np
points = [(0.1*i, np.sin(0.1*i)) for i in range(100)]
hv.Curve(points)
"""
Explanation: In addition, Element has key dimensions (kdims), value dimensions (vdims), and constant dimensions (cdims) to describe the semantics of indexing within the Element, the semantics of the underlying data contained by the Element, and any constant parameters associated with the object, respectively.
Dimensions are described in the Introduction.
The remaining Element types each have a rich, graphical display as shown below.
Chart Elements <a id='Chart Elements'></a>
Visualization of a dependent variable against an independent variable
The first large class of Elements is the Chart elements. These objects have at least one fully indexable, sliceable key dimension (typically the x axis in a plot), and usually have one or more value dimension(s) (often the y axis) that may or may not be indexable depending on the implementation. The key dimensions are normally the parameter settings for which things are measured, and the value dimensions are the data points recorded at those settings.
As described in the Columnar Data tutorial, the data can be stored in several different internal formats, such as a NumPy array of shape (N, D), where N is the number of samples and D the number of dimensions. A somewhat larger list of formats can be accepted, including any of the supported internal formats, or
As a list of length N containing tuples of length D.
As a tuple of length D containing iterables of length N.
Curve <a id='Curve'></a>
End of explanation
"""
np.random.seed(7)
points = [(0.1*i, np.sin(0.1*i)) for i in range(100)]
errors = [(0.1*i, np.sin(0.1*i), np.random.rand()/2) for i in np.linspace(0, 100, 11)]
hv.Curve(points) * hv.ErrorBars(errors)
"""
Explanation: A Curve is a set of values provided for some set of keys from a continuously indexable 1D coordinate system, where the plotted values will be connected up because they are assumed to be samples from a continuous relation.
ErrorBars <a id='ErrorBars'></a>
End of explanation
"""
%%opts ErrorBars (capthick=3)
points = [(0.1*i, np.sin(0.1*i)) for i in range(100)]
errors = [(0.1*i, np.sin(0.1*i), np.random.rand()/2, np.random.rand()/4) for i in np.linspace(0, 100, 11)]
hv.Curve(points) * hv.ErrorBars(errors, vdims=['y', 'yerrneg', 'yerrpos'])
"""
Explanation: ErrorBars is a set of x-/y-coordinates with associated error values. Error values may be either symmetric or asymmetric, and thus can be supplied as an Nx3 or Nx4 array (or any of the alternative constructors Chart Elements allow).
End of explanation
"""
np.random.seed(42)
xs = np.linspace(0, np.pi*2, 20)
err = 0.2+np.random.rand(len(xs))
hv.Spread((xs, np.sin(xs), err))
"""
Explanation: Spread <a id='Spread'></a>
Spread elements have the same data format as the ErrorBars element, namely x- and y-values with associated symmetric or assymetric errors, but are interpreted as samples from a continuous distribution (just as Curve is the continuous version of Scatter). These are often paired with an overlaid Curve to show both the mean (as a curve) and the spread of values; see the Columnar Data tutorial for examples.
Symmetric
End of explanation
"""
%%opts Spread (facecolor='indianred' alpha=1)
xs = np.linspace(0, np.pi*2, 20)
hv.Spread((xs, np.sin(xs), 0.1+np.random.rand(len(xs)), 0.1+np.random.rand(len(xs))),
vdims=['y', 'yerrneg', 'yerrpos'])
"""
Explanation: Asymmetric
End of explanation
"""
xs = np.linspace(0, np.pi*4, 40)
hv.Area((xs, np.sin(xs)))
"""
Explanation: Area <a id='Area'></a>
Area under the curve
By default the Area Element draws just the area under the curve, i.e. the region between the curve and the origin.
End of explanation
"""
X = np.linspace(0,3,200)
Y = X**2 + 3
Y2 = np.exp(X) + 2
Y3 = np.cos(X)
hv.Area((X, Y, Y2), vdims=['y', 'y2']) * hv.Area((X, Y, Y3), vdims=['y', 'y3'])
"""
Explanation: * Area between curves *
When supplied a second value dimension the area is defined as the area between two curves.
End of explanation
"""
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, kdims=[hv.Dimension('Car occupants', values='initial')], vdims=['Count'])
bars + bars[['one', 'two', 'three']]
"""
Explanation: Bars <a id='Bars'></a>
End of explanation
"""
%%opts Bars [color_by=['category', 'stack'] legend_position='top']
from itertools import product
np.random.seed(1)
groups, categories, stacks = ['A', 'B'], ['a', 'b'], ['I', 'II']
keys = product(groups, categories, stacks)
hv.Bars([(k, np.random.rand()*100) for k in keys],
kdims=['Group', 'Category', 'Stack'], vdims=['Count'])
"""
Explanation: Bars is an NdElement type, so by default it is sorted. To preserve the initial ordering specify the Dimension with values set to 'initial', or you can supply an explicit list of valid dimension keys.
Bars support up to three key dimensions which can be laid by 'group', 'category', and 'stack' dimensions. By default the key dimensions are mapped onto the first, second, and third Dimension of the Bars object, but this behavior can be overridden via the group_index, category_index, and stack_index options. You can also style each bar the way you want by creating style groups for any combination of the three dimensions. Here we color_by 'category' and 'stack', so that a given color represents some combination of those two values (according to the key shown).
End of explanation
"""
hv.BoxWhisker(np.random.randn(200), kdims=[], vdims=['Value'])
"""
Explanation: BoxWhisker <a id='BoxWhisker'></a>
The BoxWhisker Element allows representing distributions of data varying by 0-N key dimensions. To represent the distribution of a single variable, we can create a BoxWhisker Element with no key dimensions and a single value dimension:
End of explanation
"""
%%opts BoxWhisker [fig_size=200 invert_axes=True]
style = dict(boxprops=dict(color='gray', linewidth=1), whiskerprops=dict(color='indianred', linewidth=1))
groups = [chr(65+g) for g in np.random.randint(0, 3, 200)]
hv.BoxWhisker((groups, np.random.randint(0, 5, 200), np.random.randn(200)),
kdims=['Group', 'Category'], vdims=['Value'])(style=style).sort()
"""
Explanation: BoxWhisker Elements support any number of dimensions and may also be rotated. To style the boxes and whiskers, supply boxprops, whiskerprops, and flierprops.
End of explanation
"""
points = hv.Points(np.random.randn(500, 2))
points << hv.BoxWhisker(points['y']) << hv.BoxWhisker(points['x'])
"""
Explanation: BoxWhisker Elements may also be used to represent a distribution as a marginal plot by adjoining it using <<.
End of explanation
"""
np.random.seed(1)
data = [np.random.normal() for i in range(10000)]
frequencies, edges = np.histogram(data, 20)
hv.Histogram(frequencies, edges)
"""
Explanation: Histogram <a id='Histogram'></a>
End of explanation
"""
%%opts Histogram [projection='polar' show_grid=True]
data = [np.random.rand()*np.pi*2 for i in range(100)]
frequencies, edges = np.histogram(data, 20)
hv.Histogram(frequencies, edges, kdims=['Angle'])
"""
Explanation: Histograms partition the x axis into discrete (but not necessarily regular) bins, showing counts in each as a bar.
Almost all Element types, including Histogram, may be projected onto a polar axis by supplying projection='polar' as a plot option.
End of explanation
"""
%%opts Scatter (color='k', marker='s', s=50)
np.random.seed(42)
points = [(i, np.random.random()) for i in range(20)]
hv.Scatter(points) + hv.Scatter(points)[12:20]
"""
Explanation: Scatter <a id='Scatter'></a>
End of explanation
"""
np.random.seed(12)
points = np.random.rand(50,2)
hv.Points(points) + hv.Points(points)[0.6:0.8,0.2:0.5]
"""
Explanation: Scatter is the discrete equivalent of Curve, showing y values for discrete x values selected. See Points for more information.
The marker shape specified above can be any supported by matplotlib, e.g. s, d, or o; the other options select the color and size of the marker. For convenience with the bokeh backend, the matplotlib marker options are supported using a compatibility function in HoloViews.
Points <a id='Points'></a>
End of explanation
"""
for o in [hv.Points(points,name="Points "), hv.Scatter(points,name="Scatter")]:
for d in ['key','value']:
print("%s %s_dimensions: %s " % (o.name, d, o.dimensions(d,label=True)))
"""
Explanation: As you can see, Points is very similar to Scatter, and can produce some plots that look identical. However, the two Elements are very different semantically. For Scatter, the dots each show a dependent variable y for some x, such as in the Scatter example above where we selected regularly spaced values of x and then created a random number as the corresponding y. I.e., for Scatter, the y values are the data; the xs are just where the data values are located. For Points, both x and y are independent variables, known as key_dimensions in HoloViews:
End of explanation
"""
%%opts Points [color_index=2 size_index=3 scaling_method="width" scaling_factor=10]
np.random.seed(10)
data = np.random.rand(100,4)
points = hv.Points(data, vdims=['z', 'alpha'])
points + points[0.3:0.7, 0.3:0.7].hist()
"""
Explanation: The Scatter object expresses a dependent relationship between x and y, making it useful for combining with other similar Chart types, while the Points object expresses the relationship of two independent keys x and y with optional vdims (zero in this case), which makes Points objects meaningful to combine with the Raster types below.
Of course, the vdims need not be empty for Points; here is an example with two additional quantities for each point, as value_dimensions z and α visualized as the color and size of the dots, respectively. The point sizes can be tweaked using the option scaling_factor, which determines the amount by which each point width or area is scaled, depending on the value of scaling_method.
End of explanation
"""
%%opts Spikes (alpha=0.4)
xs = np.random.rand(50)
ys = np.random.rand(50)
hv.Points((xs, ys)) * hv.Spikes(xs)
"""
Explanation: Such a plot wouldn't be meaningful for Scatter, but is a valid use for Points, where the x and y locations are independent variables representing coordinates, and the "data" is conveyed by the size and color of the dots.
Spikes <a id='Spikes'></a>
Spikes represent any number of horizontal or vertical line segments with fixed or variable heights. There are a number of disparate uses for this type. First of all, they may be used as a rugplot to give an overview of a one-dimensional distribution. They may also be useful in more domain-specific cases, such as visualizing spike trains for neurophysiology or spectrograms in physics and chemistry applications.
In the simplest case, a Spikes object represents coordinates in a 1D distribution:
End of explanation
"""
%%opts Spikes (cmap='Reds')
hv.Spikes(np.random.rand(20, 2), kdims=['Mass'], vdims=['Intensity'])
"""
Explanation: When supplying two dimensions to the Spikes object, the second dimension will be mapped onto the line height. Optionally, you may also supply a cmap and color_index to map color onto one of the dimensions. This way we can, for example, plot a mass spectrogram:
End of explanation
"""
%%opts Spikes NdOverlay [show_legend=False]
hv.NdOverlay({i: hv.Spikes(np.random.randint(0, 100, 10), kdims=['Time'])(plot=dict(position=0.1*i))
for i in range(10)})(plot=dict(yticks=[((i+1)*0.1-0.05, i) for i in range(10)]))
"""
Explanation: Another possibility is to draw a number of spike trains as you would encounter in neuroscience. Here we generate 10 separate random spike trains and distribute them evenly across the space by setting their position. By also declaring some yticks, each spike train can be labeled individually:
End of explanation
"""
%%opts Spikes (alpha=0.05) [spike_length=1]
points = hv.Points(np.random.randn(500, 2))
points << hv.Spikes(points['y']) << hv.Spikes(points['x'])
"""
Explanation: Finally, we may use Spikes to visualize marginal distributions as adjoined plots using the << adjoin operator:
End of explanation
"""
y,x = np.mgrid[-10:10,-10:10] * 0.25
sine_rings = np.sin(x**2+y**2)*np.pi+np.pi
exp_falloff = 1/np.exp((x**2+y**2)/8)
vector_data = [x,y,sine_rings, exp_falloff]
hv.VectorField(vector_data)
"""
Explanation: VectorField <a id='VectorField'></a>
End of explanation
"""
%%opts VectorField.A [color_dim='angle'] VectorField.M [color_dim='magnitude']
hv.VectorField(vector_data, group='A')
"""
Explanation: As you can see above, here the x and y positions are chosen to make a regular grid. The arrow angles follow a sinsoidal ring pattern, and the arrow lengths fall off exponentially from the center, so this plot has four dimensions of data (direction and length for each x,y position).
Using the IPython %%opts cell-magic (described in the Options tutorial, along with the Python equivalent), we can also use color as a redundant indicator to the direction or magnitude:
End of explanation
"""
n=20
x=np.linspace(1,3,n)
y=np.sin(np.linspace(0,2*np.pi,n))/4
hv.VectorField([x,y,x*5,np.ones(n)]) * hv.VectorField([x,-y,x*5,np.ones(n)])
"""
Explanation: The vector fields above were sampled on a regular grid, but any collection of x,y values is allowed:
End of explanation
"""
import numpy as np
np.random.seed(42)
points = [(i, np.random.normal()) for i in range(800)]
hv.Scatter(points).hist()
"""
Explanation: SideHistogram <a id='SideHistogram'></a>
The .hist method conveniently adjoins a histogram to the side of any Chart, Surface, or Raster component, as well as many of the container types (though it would be reporting data from one of these underlying Element types). For a Raster using color or grayscale to show values (see Raster section below), the side histogram doubles as a color bar or key.
End of explanation
"""
%%opts Surface (cmap='jet' rstride=20, cstride=2)
hv.Surface(np.sin(np.linspace(0,100*np.pi*2,10000)).reshape(100,100))
"""
Explanation: Chart3D Elements <a id='Chart3D Elements'></a>
Surface <a id='Surface'></a>
End of explanation
"""
%%opts Scatter3D [azimuth=40 elevation=20]
y,x = np.mgrid[-5:5, -5:5] * 0.1
heights = np.sin(x**2+y**2)
hv.Scatter3D(zip(x.flat,y.flat,heights.flat))
"""
Explanation: Surface is used for a set of gridded points whose associated value dimension represents samples from a continuous surface; it is the equivalent of a Curve but with two key dimensions instead of just one.
Scatter3D <a id='Scatter3D'></a>
End of explanation
"""
%%opts Trisurface [fig_size=200] (cmap='hot_r')
hv.Trisurface((x.flat,y.flat,heights.flat))
"""
Explanation: Scatter3D is the equivalent of Scatter but for two key dimensions, rather than just one.
Trisurface <a id='Trisurface'></a>
The Trisurface Element renders any collection of 3D points as a Surface by applying Delaunay triangulation. It thus supports arbitrary, non-gridded data, but it does not support indexing to find data values, since finding the closest ones would require a search.
End of explanation
"""
x,y = np.meshgrid(np.linspace(-5,5,101), np.linspace(5,-5,101))
"""
Explanation: Raster Elements <a id='Raster Elements'></a>
A collection of raster image types
The second large class of Elements is the raster elements. Like Points and unlike the other Chart elements, Raster Elements live in a 2D key-dimensions space. For the Image, RGB, and HSV elements, the coordinates of this two-dimensional key space are defined in a continuously indexable coordinate system. We can use np.meshgrid to define the appropriate sampling along the x and y dimensions:
End of explanation
"""
hv.Raster(np.sin(x**2+y**2))
"""
Explanation: Raster <a id='Raster'></a>
A Raster is the base class for image-like Elements, but may be used directly to visualize 2D arrays using a color map. The coordinate system of a Raster is the raw indexes of the underlying array, with integer values always starting from (0,0) in the top left, with default extents corresponding to the shape of the array. The Image subclass visualizes similarly, but using a continuous Cartesian coordinate system suitable for an array that represents some underlying continuous region.
End of explanation
"""
n = 21
xs = np.logspace(1, 3, n)
ys = np.linspace(1, 10, n)
hv.QuadMesh((xs, ys, np.random.rand(n-1, n-1)))
"""
Explanation: QuadMesh <a id='QuadMesh'></a>
The basic QuadMesh is a 2D grid of bins specified as x-/y-values specifying a regular sampling or edges, with arbitrary sampling and an associated 2D array containing the bin values. The coordinate system of a QuadMesh is defined by the bin edges, therefore any index falling into a binned region will return the appropriate value. Unlike Image objects, slices must be inclusive of the bin edges.
End of explanation
"""
coords = np.linspace(-1.5,1.5,n)
X,Y = np.meshgrid(coords, coords);
Qx = np.cos(Y) - np.cos(X)
Qz = np.sin(Y) + np.sin(X)
Z = np.sqrt(X**2 + Y**2)
hv.QuadMesh((Qx, Qz, Z))
"""
Explanation: QuadMesh may also be used to represent an arbitrary mesh of quadrilaterals by supplying three separate 2D arrays representing the coordinates of each quadrilateral in a 2D space. Note that when using QuadMesh in this mode, slicing and indexing semantics and most operations will currently not work.
End of explanation
"""
data = {(chr(65+i),chr(97+j)): i*j for i in range(5) for j in range(5) if i!=j}
hv.HeatMap(data).sort()
"""
Explanation: HeatMap <a id='HeatMap'></a>
A HeatMap displays like a typical raster image, but the input is a dictionary indexed with two-dimensional keys, not a Numpy array or Pandas dataframe. As many rows and columns as required will be created to display the values in an appropriate grid format. Values unspecified are left blank, and the keys can be any Python datatype (not necessarily numeric). One typical usage is to show values from a set of experiments, such as a parameter space exploration, and many other such visualizations are shown in the Containers and Exploring Data tutorials. Each value in a HeatMap is labeled explicitly by default, and so this component is not meant for very large numbers of samples. With the default color map, high values (in the upper half of the range present) are colored orange and red, while low values (in the lower half of the range present) are colored shades of blue.
End of explanation
"""
bounds=(-2,-3,5,2) # Coordinate system: (left, bottom, top, right)
xs,ys = np.meshgrid(np.linspace(-2,5,50), np.linspace(2,-3, 30))
(hv.Image(np.sin(xs)+ys, bounds=bounds)
+ hv.Image(np.sin(xs)+ys, bounds=bounds)[0:3, -2.5:2])
"""
Explanation: Image <a id='Image'></a>
Like Raster, a HoloViews Image allows you to view 2D arrays using an arbitrary color map. Unlike Raster, an Image is associated with a 2D coordinate system in continuous space, which is appropriate for values sampled from some underlying continuous distribution (as in a photograph or other measurements from locations in real space). Slicing, sampling, etc. on an Image all use this continuous space, whereas the corresponding operations on a Raster work on the raw array coordinates.
To make the coordinate system clear, we'll define two arrays called xs and ys with a non-square aspect and map them through a simple function that illustrate how these inputs relate to the coordinate system:
End of explanation
"""
r = 0.5*np.sin(np.pi +3*x**2+y**2)+0.5
g = 0.5*np.sin(x**2+2*y**2)+0.5
b = 0.5*np.sin(np.pi/2+x**2+y**2)+0.5
hv.RGB(np.dstack([r,g,b]))
"""
Explanation: Notice how, because our declared coordinate system is continuous, we can slice with any floating-point value we choose. The appropriate range of the samples in the input numpy array will always be displayed, whether or not there are samples at those specific floating-point values.
It is also worth noting that the name Image can clash with other common libraries, which is one reason to avoid unqualified imports like from holoviews import *. For instance, the Python Imaging Libray provides an Image module, and IPython itself supplies an Image class in IPython.display. Python namespaces allow you to avoid such problems, e.g. using from PIL import Image as PILImage or using import holoviews as hv and then hv.Image(), as we do in these tutorials.
RGB <a id='RGB'></a>
The RGB element is an Image that supports red, green, blue channels:
End of explanation
"""
%%opts Image (cmap='gray')
hv.Image(r,label="R") + hv.Image(g,label="G") + hv.Image(b,label="B")
"""
Explanation: You can see how the RGB object is created from the original channels:
End of explanation
"""
%%opts Image (cmap='gray')
mask = 0.5*np.sin(0.2*(x**2+y**2))+0.5
rgba = hv.RGB(np.dstack([r,g,b,mask]))
bg = hv.Image(0.5*np.cos(x*3)+0.5, label="Background") * hv.VLine(x=0,label="Background")
overlay = bg*rgba
overlay.label="RGBA Overlay"
bg + hv.Image(mask,label="Mask") + overlay
"""
Explanation: RGB also supports an optional alpha channel, which will be used as a mask revealing or hiding any Elements it is overlaid on top of:
End of explanation
"""
h = 0.5 + np.sin(0.2*(x**2+y**2)) / 2.0
s = 0.5*np.cos(x*3)+0.5
v = 0.5*np.cos(y*3)+0.5
hv.HSV(np.dstack([h, s, v]))
"""
Explanation: HSV <a id='HSV'></a>
HoloViews makes it trivial to work in any color space that can be converted to RGB by making a simple subclass of RGB as appropriate. For instance, we also provide the HSV (hue, saturation, value) color space, which is useful for plotting cyclic data (as the Hue) along with two additional dimensions (controlling the saturation and value of the color, respectively):
End of explanation
"""
%%opts Image (cmap='gray')
hv.Image(h, label="H") + hv.Image(s, label="S") + hv.Image(v, label="V")
"""
Explanation: You can see how this is created from the original channels:
End of explanation
"""
hv.ItemTable([('Age', 10), ('Weight',15), ('Height','0.8 meters')])
"""
Explanation: Tabular Elements <a id='Tabular Elements'></a>
General data structures for holding arbitrary information
ItemTable <a id='ItemTable'></a>
An ItemTable is an ordered collection of key, value pairs. It can be used to directly visualize items in a tabular format where the items may be supplied as an OrderedDict or a list of (key,value) pairs. A standard Python dictionary can be easily visualized using a call to the .items() method, though the entries in such a dictionary are not kept in any particular order, and so you may wish to sort them before display. One typical usage for an ItemTable is to list parameter values or measurements associated with an adjacent Element.
End of explanation
"""
keys = [('M',10), ('M',16), ('F',12)]
values = [(15, 0.8), (18, 0.6), (10, 0.8)]
table = hv.Table(zip(keys,values),
kdims = ['Gender', 'Age'],
vdims=['Weight', 'Height'])
table
"""
Explanation: Table <a id='Table'></a>
A table is more general than an ItemTable, as it allows multi-dimensional keys and multidimensional values.
End of explanation
"""
table.select(Gender='M') + table.select(Gender='M', Age=10)
"""
Explanation: Note that you can use select using tables, and once you select using a full, multidimensional key, you get an ItemTable (shown on the right):
End of explanation
"""
table.select(Gender='M').to.curve(kdims=["Age"], vdims=["Weight"])
"""
Explanation: The Table is used as a common data structure that may be converted to any other HoloViews data structure using the TableConversion class.
The functionality of the TableConversion class may be conveniently accessed using the .to property. For more extended usage of table conversion see the Columnar Data and Pandas Conversion Tutorials.
End of explanation
"""
scene = hv.RGB.load_image('../assets/penguins.png')
"""
Explanation: Annotation Elements <a id='Annotation Elements'></a>
Useful information that can be overlaid onto other components
Annotations are components designed to be overlaid on top of other Element objects. To demonstrate annotation and paths, we will be drawing many of our elements on top of an RGB Image:
End of explanation
"""
scene * hv.VLine(-0.05) + scene * hv.HLine(-0.05)
"""
Explanation: VLine and HLine <a id='VLine'></a><a id='HLine'></a>
End of explanation
"""
points = [(-0.3, -0.3), (0,0), (0.25, -0.25), (0.3, 0.3)]
codes = [1,4,4,4]
scene * hv.Spline((points,codes)) * hv.Curve(points)
"""
Explanation: Spline <a id='Spline'></a>
The Spline annotation is used to draw Bezier splines using the same semantics as matplotlib splines. In the overlay below, the spline is in dark blue and the control points are in light blue.
End of explanation
"""
scene * hv.Text(0, 0.2, 'Adult\npenguins') + scene * hv.Arrow(0,-0.1, 'Baby penguin', 'v')
"""
Explanation: Text and Arrow <a id='Text'></a><a id='Arrow'></a>
End of explanation
"""
angle = np.linspace(0, 2*np.pi, 100)
baby = list(zip(0.15*np.sin(angle), 0.2*np.cos(angle)-0.2))
adultR = [(0.25, 0.45), (0.35,0.35), (0.25, 0.25), (0.15, 0.35), (0.25, 0.45)]
adultL = [(-0.3, 0.4), (-0.3, 0.3), (-0.2, 0.3), (-0.2, 0.4),(-0.3, 0.4)]
scene * hv.Path([adultL, adultR, baby]) * hv.Path([baby])
"""
Explanation: Paths <a id='Path Elements'></a>
Line-based components that can be overlaid onto other components
Paths are a subclass of annotations that involve drawing line-based components on top of other elements. Internally, Path Element types hold a list of Nx2 arrays, specifying the x/y-coordinates along each path. The data may be supplied in a number of ways, including:
A list of Nx2 numpy arrays.
A list of lists containing x/y coordinate tuples.
A tuple containing an array of length N with the x-values and a second array of shape NxP, where P is the number of paths.
A list of tuples each containing separate x and y values.
Path <a id='Path'></a>
A Path object is actually a collection of paths which can be arbitrarily specified. Although there may be multiple unconnected paths in a single Path object, they will all share the same style. Only by overlaying multiple Path objects do you iterate through the defined color cycle (or any other style options that have been defined).
End of explanation
"""
def circle(radius, x=0, y=0):
angles = np.linspace(0, 2*np.pi, 100)
return np.array( list(zip(x+radius*np.sin(angles), y+radius*np.cos(angles))))
hv.Image(np.sin(x**2+y**2)) * hv.Contours([circle(0.22)], level=0) * hv.Contours([circle(0.33)], level=1)
"""
Explanation: Contours <a id='Contours'></a>
A Contours object is similar to Path object except each of the path elements is associated with a numeric value, called the level. Sadly, our penguins are too complicated to give a simple example so instead we will simply mark the first couple of rings of our earlier ring pattern:
End of explanation
"""
%%opts Polygons (cmap='hot' edgecolor='k' linewidth=2)
np.random.seed(35)
hv.Polygons([np.random.rand(4,2)], level=0.5) *\
hv.Polygons([np.random.rand(4,2)], level=1.0) *\
hv.Polygons([np.random.rand(4,2)], level=1.5) *\
hv.Polygons([np.random.rand(4,2)], level=2.0)
"""
Explanation: Polygons <a id='Polygons'></a>
A Polygons object is similar to a Contours object except that each supplied path is closed and filled. Just like Contours, optionally a level may be supplied; the Polygons will then be colored according to the supplied cmap. Non-finite values such as np.NaN or np.inf will default to the supplied facecolor.
Polygons with values can be used to build heatmaps with arbitrary shapes.
End of explanation
"""
def rectangle(x=0, y=0, width=1, height=1):
return np.array([(x,y), (x+width, y), (x+width, y+height), (x, y+height)])
(hv.Polygons([rectangle(width=2), rectangle(x=6, width=2)])(style={'facecolor': '#a50d0d'})
* hv.Polygons([rectangle(x=2, height=2), rectangle(x=5, height=2)])(style={'facecolor': '#ffcc00'})
* hv.Polygons([rectangle(x=3, height=2, width=2)])(style={'facecolor': 'c', 'hatch':'x'}))
"""
Explanation: Polygons without a value are useful as annotation, but also allow us to draw arbitrary shapes.
End of explanation
"""
scene * hv.Bounds(0.2) * hv.Bounds((0.45, 0.45, 0.2, 0.2))
"""
Explanation: Bounds <a id='Bounds'></a>
A bounds is a rectangular area specified as a tuple in (left, bottom, right, top) format. It is useful for denoting a region of interest defined by some bounds, whereas Box (below) is useful for drawing a box at a specific location.
End of explanation
"""
scene * hv.Box( -0.25, 0.3, 0.3, aspect=0.5) * hv.Box( 0, -0.2, 0.1) + \
scene * hv.Ellipse(-0.25, 0.3, 0.3, aspect=0.5) * hv.Ellipse(0, -0.2, 0.1)
"""
Explanation: Box <a id='Box'></a> and Ellipse <a id='Ellipse'></a>
A Box is similar to a Bounds except you specify the box position, width, and aspect ratio instead of the coordinates of the box corners. An Ellipse is specified just as for Box, but has a rounded shape.
End of explanation
"""
|
mercye/foundations-homework | 11/Homework_11_Emelike.ipynb | mit | # checks data type of each value in series Plate ID by printing if type does not equal string
# all values are strings
for x in df['Plate ID']:
if type(x) != str:
print(type(x))
"""
Explanation: 1. I want to make sure my Plate ID is a string. Can't lose the leading zeroes!
End of explanation
"""
df['Vehicle Year'] = df['Vehicle Year'].replace(0, np.nan)
df['Vehicle Year'] = pd.to_datetime(df['Vehicle Year'], format='%Y', errors = 'coerce')
"""
Explanation: 2. I don't think anyone's car was built in 0AD. Discard the '0's as NaN.
End of explanation
"""
# see where I read in csv for inclusion of parse_date arg
"""
Explanation: 3. I want the dates to be dates! Read the read_csv documentation to find out how to make pandas automatically parse dates.
End of explanation
"""
df['Date First Observed'] = df['Date First Observed'].apply(lambda x: pd.to_datetime(str(x), format='%Y%m%d', errors='coerce'))
"""
Explanation: 4. "Date first observed" is a pretty weird column, but it seems like it has a date hiding inside. Using a function with .apply, transform the string (e.g. "20140324") into a Python date. Make the 0's show up as NaN.
End of explanation
"""
def remove_char(x):
x = x[:-1]
return x
def insert_char(x):
x = x[:2] + ':' + x[2:]
return x
# removes 'A' and 'P' from original values in Violation Time
df['Violation Time2'] = df['Violation Time'].apply(lambda x: remove_char(str(x)))
# inserts colon in to string
df['Violation Time3'] = df['Violation Time2'].apply(lambda x: insert_char(str(x)))
#makes values datetime format, allowing .to_datetime to infer format from string
df['Violation Time4'] = df['Violation Time3'].apply(lambda x: pd.to_datetime(str(x), infer_datetime_format=True, errors='coerce'))
#selects only time from timestamp
df['Violation Time'] = df['Violation Time4'].dt.time
# deletes temporary variables used to make violation time a time
df=df.drop(df.columns[[43, 44, 45]], axis=1)
df.columns
"""
Explanation: 5. "Violation time" is... not a time. Make it a time.
End of explanation
"""
def clean_color(x):
if x == 'BLK':
print(x)
x = x.replace('BLK','BLACK')
print('Now', x)
elif x == 'BK':
print(x)
x = x.replace('BK','BLACK')
print('Now', x)
elif x == 'W':
print(x)
x = x.replace('W','WHITE')
print('Now', x)
elif x == 'WH':
print(x)
x = x.replace('WH','WHITE')
print('Now', x)
elif x == 'WHT':
print(x)
x = x.replace('WHT','WHITE')
print('Now', x)
elif x == 'WT':
print(x)
x = x.replace('WT','WHITE')
print('Now', x)
elif x == 'GY':
print(x)
x = x.replace('GY','GREY')
print('Now', x)
elif x == 'GRAY':
print(x)
x = x.replace('GRAY','GREY')
print('Now', x)
elif x == 'GRY':
print(x)
x = x.replace('GRY','GREY')
print('Now', x)
elif x == 'G/Y':
print(x)
x = x.replace('G/Y','GREY')
print('Now', x)
elif x == 'BR':
print(x)
x = x.replace('BR','BROWN')
print('Now', x)
elif x == 'BRW':
print(x)
x = x.replace('BRW','BROWN')
print('Now', x)
elif x == 'TN':
print(x)
x = x.replace('GY','TAN')
print('Now', x)
elif x == 'RD':
print(x)
x = x.replace('RD','RED')
print('Now', x)
elif x == 'BL':
print(x)
x = x.replace('BL','BLUE')
print('Now', x)
elif x == 'BLU':
print(x)
x = x.replace('BLU','BLUE')
print('Now', x)
elif x == 'SIL':
print(x)
x = x.replace('SIL','SILVER')
print('Now', x)
elif x == 'SILVE':
print(x)
x = x.replace('SILVE','SILVER')
print('Now', x)
elif x == 'BURGA':
print(x)
x = x.replace('BURGA','BURGANDY')
print('Now', x)
elif x == 'PURPL':
print(x)
x = x.replace('PURPL','PURPLE')
print('Now', x)
elif x == 'PUR':
print(x)
x = x.replace('PUR','PURPLE')
print('Now', x)
elif x == 'YELLO':
print(x)
x = x.replace('YELLO','YELLOW')
print('Now', x)
elif x == 'YW':
print(x)
x = x.replace('YW','YELLOW')
print('Now', x)
elif x == 'OR':
print(x)
x = x.replace('OR','ORANGE')
print('Now', x)
elif x == 'GL':
print(x)
x = x.replace('GL','GOLD')
print('Now', x)
elif x == 'GR':
print(x)
x = x.replace('GR','GREEN')
print('Now', x)
elif x == 'GRN':
print(x)
x = x.replace('GRN','GREEN')
print('Now', x)
elif x == 'G':
print(x)
x = x.replace('G','GREEN')
print('Now', x)
return x
df['Vehicle Color'] = df['Vehicle Color'].apply(lambda x: clean_color(x))
df['Vehicle Color']
#check for remaining color abbreviations
for x in df['Vehicle Color']:
if len(str(x)) <= 2:
print(x)
"""
Explanation: 6. There sure are a lot of colors of cars, too bad so many of them are the same. Make "BLK" and "BLACK", "WT" and "WHITE", and any other combinations that you notice.
End of explanation
"""
df2 = pd.read_csv('DOF_Parking_Violation_Codes.csv')
df2
# change format to string to match violation codes data set format
df['Violation Code'] = df['Violation Code'].apply(lambda x: str(x).strip())
# check formatting and string length
for x in df['Violation Code']:
if type(x) != str:
print(type(x))
if len(x) > 2:
print(x)
df3 = pd.merge(df, df2, left_on='Violation Code', right_on='CODE', how='left', indicator=True)
# unsuccessful merges bc Violation Code 41 does not exist in Violation codes data.
# and bc 38 in documented at 37-38 in Violation codes data
df3[df3['_merge']=='left_only']
"""
Explanation: 7. Join the data with the Parking Violations Code dataset from the NYC Open Data site.
End of explanation
"""
# removes unsuccessful merges
df4 = df3[df3['_merge'] !='left_only']
# Create boolean variable to indicate 0/1: Parking violation
df4['Parking Violation'] = df4['DEFINITION'].str.contains('[Pp]arking') | df4['DEFINITION'].str.contains('[Pp]arked')
df4['Parking Violation'].value_counts()
df4[df4['Parking Violation'] == False]
df4['Street Name'].value_counts()
#create empty list
above_96_lst=[]
#create bool object
for x in df4['Street Name']:
if re.search(r'\S \d \S\w ', str(x)):
above_96 = False
above_96_lst.append(above_96)
elif re.search(r'\d[[Aa]-[Zz]][[Aa]-[Zz]] ', str(x)):
above_96 = False
above_96_lst.append(above_96)
# print(x, above_96)
elif re.search(r'\w\s\d\d\s', str(x)):
above_96 = False
above_96_lst.append(above_96)
# print(x, above_96)
elif re.search(r'[A-Z]\s\d\d[A-Z][A-Z] [A-Z][A-Z]', str(x)):
above_96 = False
above_96_lst.append(above_96)
# print(x, above_96)
elif re.search(r'[0-2][A-Z][A-Z] AVE', str(x)):
above_96 = False
above_96_lst.append(above_96)
# print(x, above_96)
else:
above_96 = True
above_96_lst.append(above_96)
# print(x, above_96)
# above_96_lst.count(False)
df4['Above_96'] = above_96_lst
# rename column so easier to reference
df4.rename(columns={"Manhattan\xa0 96th St. & below": "Manhattan_Below_96"}, inplace=True)
df4[['Violation Code', 'Street Name', 'Above_96', 'Manhattan_Below_96', 'All Other Areas', 'Parking Violation']].head(20)
# remove dollar signs
df4['Manhattan_Below_96'] = df4['Manhattan_Below_96'].str.replace('$', '')
# replace long descriptions of cost with only the amount
# df4['Manhattan_Below_96'] = df4['Manhattan_Below_96'].str.replace('100\n(Regular Tow, plus violation fine)', '100')
df4.ix[df4['Manhattan_Below_96'] == '100\n(Regular Tow, plus violation fine)', 'Manhattan_Below_96'] = '100'
# replace 'vary' with zero
df4['Manhattan_Below_96'] = df4['Manhattan_Below_96'].str.replace('vary', '0')
# change to int
df4['Manhattan_Below_96'].apply(lambda x: int(x))
df4['All Other Areas'] = df4['All Other Areas'].str.replace('$', '')
df4['All Other Areas'] = df4['All Other Areas'].str.replace('vary', '0')
df4.ix[df4['All Other Areas'] == '200 (Heavy Tow, plus violation fine)', 'All Other Areas'] = '200'
df4['All Other Areas'].apply(lambda x: int(x))
"""
Explanation: 8. How much money did NYC make off of parking violations?
End of explanation
"""
|
calico/basenji | tutorials/sat_mut.ipynb | apache-2.0 | if not os.path.isfile('data/hg19.ml.fa'):
subprocess.call('curl -o data/hg19.ml.fa https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa', shell=True)
subprocess.call('curl -o data/hg19.ml.fa.fai https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa.fai', shell=True)
if not os.path.isdir('models/heart'):
os.mkdir('models/heart')
if not os.path.isfile('models/heart/model_best.h5'):
subprocess.call('curl -o models/heart/model_best.h5 https://storage.googleapis.com/basenji_tutorial_data/model_best.h5', shell=True)
lines = [['index','identifier','file','clip','sum_stat','description']]
lines.append(['0', 'CNhs11760', 'data/CNhs11760.bw', '384', 'sum', 'aorta'])
lines.append(['1', 'CNhs12843', 'data/CNhs12843.bw', '384', 'sum', 'artery'])
lines.append(['2', 'CNhs12856', 'data/CNhs12856.bw', '384', 'sum', 'pulmonic_valve'])
samples_out = open('data/heart_wigs.txt', 'w')
for line in lines:
print('\t'.join(line), file=samples_out)
samples_out.close()
"""
Explanation: Precursors
End of explanation
"""
! basenji_sat_bed.py -f data/hg19.ml.fa -l 200 -o output/gata4_sat --rc -t data/heart_wigs.txt models/params_small.json models/heart/model_best.h5 data/gata4.bed
"""
Explanation: Compute scores
Saturation mutagenesis is a powerful tool both for dissecting a specific sequence of interest and understanding what the model learned. basenji_sat_bed.py enables this analysis from a test set of data. basenji_sat_vcf.py lets you provide a VCF file for variant-centered mutagenesis.
To do this, you'll need
* Trained model
* BED file
First, you can either train your own model in the Train/test tutorial or use one that I pre-trained from the models subdirectory.
We'll bash the GATA4 promoter to see what motifs drive its expression. I placed a BED file surrounding the GATA4 TSS in data/gata4.bed, so we'll use basenji_sat_bed.py.
The most relevant options are:
| Option/Argument | Value | Note |
|:---|:---|:---|
| -f | data/hg19.ml.fa | Genome FASTA to extract sequences. |
| -l | 200 | Saturation mutagenesis region in the center of the given sequence(s) |
| -o | gata4_sat | Outplot plot directory. |
| --rc | True | Predict forward and reverse complement versions and average the results. |
| -t | data/heart_wigs.txt | Target indexes to analyze. |
| params_file | models/params_small.json | JSON specified parameters to setup the model architecture and optimization parameters. |
| model_file | models/heart/model_best.h5 | Trained saved model parameters. |
| input_file | data/gata4.bed | BED regions. |
End of explanation
"""
! basenji_sat_plot.py --png -l 200 -o output/gata4_sat/plots -t data/heart_wigs.txt output/gata4_sat/scores.h5
! ls output/gata4_sat/plots
"""
Explanation: Plot
The saturation mutagenesis scores go into output/gata4_sat/scores.h5. Then we can use basenji_sat_plot.py to visualize the scores.
The most relevant options are:
| Option/Argument | Value | Note |
|:---|:---|:---|
| -g | True | Draw a sequence logo for the gain score, too, identifying repressor motifs. |
| -l | 200 | Saturation mutagenesis region in the center of the given sequence(s) |
| -o | output/gata4_sat/plots | Outplot plot directory. |
| -t | data/heart_wigs.txt | Target indexes to analyze. |
| scores_file | output/gata4_sat/scores.h5 | Scores HDF5 from above. |
End of explanation
"""
IFrame('output/gata4_sat/plots/seq0_t0.png', width=1200, height=400)
"""
Explanation: The resulting plots reveal a low level of activity, with a GC-rich motif driving the only signal.
End of explanation
"""
|
AllenDowney/ModSimPy | notebooks/chap12.ipynb | mit | # Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
"""
Explanation: Modeling and Simulation in Python
Chapter 12
Copyright 2017 Allen Downey
License: Creative Commons Attribution 4.0 International
End of explanation
"""
def make_system(beta, gamma):
"""Make a system object for the SIR model.
beta: contact rate in days
gamma: recovery rate in days
returns: System object
"""
init = State(S=89, I=1, R=0)
init /= sum(init)
t0 = 0
t_end = 7 * 14
return System(init=init, t0=t0, t_end=t_end,
beta=beta, gamma=gamma)
def update_func(state, t, system):
"""Update the SIR model.
state: State with variables S, I, R
t: time step
system: System with beta and gamma
returns: State object
"""
s, i, r = state
infected = system.beta * i * s
recovered = system.gamma * i
s -= infected
i += infected - recovered
r += recovered
return State(S=s, I=i, R=r)
def run_simulation(system, update_func):
"""Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
"""
frame = TimeFrame(columns=system.init.index)
frame.row[system.t0] = system.init
for t in linrange(system.t0, system.t_end):
frame.row[t+1] = update_func(frame.row[t], t, system)
return frame
"""
Explanation: Code
Here's the code from the previous notebook that we'll need.
End of explanation
"""
def calc_total_infected(results):
"""Fraction of population infected during the simulation.
results: DataFrame with columns S, I, R
returns: fraction of population
"""
return get_first_value(results.S) - get_last_value(results.S)
"""
Explanation: Metrics
Given the results, we can compute metrics that quantify whatever we are interested in, like the total number of sick students, for example.
End of explanation
"""
beta = 0.333
gamma = 0.25
system = make_system(beta, gamma)
results = run_simulation(system, update_func)
print(beta, gamma, calc_total_infected(results))
"""
Explanation: Here's an example.|
End of explanation
"""
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercise: Write functions that take a TimeFrame object as a parameter and compute the other metrics mentioned in the book:
The fraction of students who are sick at the peak of the outbreak.
The day the outbreak peaks.
The fraction of students who are sick at the end of the semester.
Note: Not all of these functions require the System object, but when you write a set of related functons, it is often convenient if they all take the same parameters.
Hint: If you have a TimeSeries called I, you can compute the largest value of the series like this:
I.max()
And the index of the largest value like this:
I.idxmax()
You can read about these functions in the Series documentation.
End of explanation
"""
def add_immunization(system, fraction):
"""Immunize a fraction of the population.
Moves the given fraction from S to R.
system: System object
fraction: number from 0 to 1
"""
system.init.S -= fraction
system.init.R += fraction
"""
Explanation: What if?
We can use this model to evaluate "what if" scenarios. For example, this function models the effect of immunization by moving some fraction of the population from S to R before the simulation starts.
End of explanation
"""
tc = 3 # time between contacts in days
tr = 4 # recovery time in days
beta = 1 / tc # contact rate in per day
gamma = 1 / tr # recovery rate in per day
system = make_system(beta, gamma)
"""
Explanation: Let's start again with the system we used in the previous sections.
End of explanation
"""
results = run_simulation(system, update_func)
calc_total_infected(results)
"""
Explanation: And run the model without immunization.
End of explanation
"""
system2 = make_system(beta, gamma)
add_immunization(system2, 0.1)
results2 = run_simulation(system2, update_func)
calc_total_infected(results2)
"""
Explanation: Now with 10% immunization.
End of explanation
"""
plot(results.S, '-', label='No immunization')
plot(results2.S, '--', label='10% immunization')
decorate(xlabel='Time (days)',
ylabel='Fraction susceptible')
savefig('figs/chap12-fig01.pdf')
"""
Explanation: 10% immunization leads to a drop in infections of 16 percentage points.
Here's what the time series looks like for S, with and without immunization.
End of explanation
"""
immunize_array = linspace(0, 1, 11)
for fraction in immunize_array:
system = make_system(beta, gamma)
add_immunization(system, fraction)
results = run_simulation(system, update_func)
print(fraction, calc_total_infected(results))
"""
Explanation: Now we can sweep through a range of values for the fraction of the population who are immunized.
End of explanation
"""
def sweep_immunity(immunize_array):
"""Sweeps a range of values for immunity.
immunize_array: array of fraction immunized
returns: Sweep object
"""
sweep = SweepSeries()
for fraction in immunize_array:
system = make_system(beta, gamma)
add_immunization(system, fraction)
results = run_simulation(system, update_func)
sweep[fraction] = calc_total_infected(results)
return sweep
"""
Explanation: This function does the same thing and stores the results in a Sweep object.
End of explanation
"""
immunize_array = linspace(0, 1, 21)
infected_sweep = sweep_immunity(immunize_array)
"""
Explanation: Here's how we run it.
End of explanation
"""
plot(infected_sweep)
decorate(xlabel='Fraction immunized',
ylabel='Total fraction infected',
title='Fraction infected vs. immunization rate',
legend=False)
savefig('figs/chap12-fig02.pdf')
"""
Explanation: And here's what the results look like.
End of explanation
"""
def logistic(x, A=0, B=1, C=1, M=0, K=1, Q=1, nu=1):
"""Computes the generalize logistic function.
A: controls the lower bound
B: controls the steepness of the transition
C: not all that useful, AFAIK
M: controls the location of the transition
K: controls the upper bound
Q: shift the transition left or right
nu: affects the symmetry of the transition
returns: float or array
"""
exponent = -B * (x - M)
denom = C + Q * exp(exponent)
return A + (K-A) / denom ** (1/nu)
"""
Explanation: If 40% of the population is immunized, less than 4% of the population gets sick.
Logistic function
To model the effect of a hand-washing campaign, I'll use a generalized logistic function (GLF), which is a convenient function for modeling curves that have a generally sigmoid shape. The parameters of the GLF correspond to various features of the curve in a way that makes it easy to find a function that has the shape you want, based on data or background information about the scenario.
End of explanation
"""
spending = linspace(0, 1200, 21)
"""
Explanation: The following array represents the range of possible spending.
End of explanation
"""
def compute_factor(spending):
"""Reduction factor as a function of spending.
spending: dollars from 0 to 1200
returns: fractional reduction in beta
"""
return logistic(spending, M=500, K=0.2, B=0.01)
"""
Explanation: compute_factor computes the reduction in beta for a given level of campaign spending.
M is chosen so the transition happens around \$500.
K is the maximum reduction in beta, 20%.
B is chosen by trial and error to yield a curve that seems feasible.
End of explanation
"""
percent_reduction = compute_factor(spending) * 100
plot(spending, percent_reduction)
decorate(xlabel='Hand-washing campaign spending (USD)',
ylabel='Percent reduction in infection rate',
title='Effect of hand washing on infection rate',
legend=False)
"""
Explanation: Here's what it looks like.
End of explanation
"""
def add_hand_washing(system, spending):
"""Modifies system to model the effect of hand washing.
system: System object
spending: campaign spending in USD
"""
factor = compute_factor(spending)
system.beta *= (1 - factor)
"""
Explanation: Exercise: Modify the parameters M, K, and B, and see what effect they have on the shape of the curve. Read about the generalized logistic function on Wikipedia. Modify the other parameters and see what effect they have.
Hand washing
Now we can model the effect of a hand-washing campaign by modifying beta
End of explanation
"""
tc = 3 # time between contacts in days
tr = 4 # recovery time in days
beta = 1 / tc # contact rate in per day
gamma = 1 / tr # recovery rate in per day
beta, gamma
"""
Explanation: Let's start with the same values of beta and gamma we've been using.
End of explanation
"""
spending_array = linspace(0, 1200, 13)
for spending in spending_array:
system = make_system(beta, gamma)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
print(spending, system.beta, calc_total_infected(results))
"""
Explanation: Now we can sweep different levels of campaign spending.
End of explanation
"""
def sweep_hand_washing(spending_array):
"""Run simulations with a range of spending.
spending_array: array of dollars from 0 to 1200
returns: Sweep object
"""
sweep = SweepSeries()
for spending in spending_array:
system = make_system(beta, gamma)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
sweep[spending] = calc_total_infected(results)
return sweep
"""
Explanation: Here's a function that sweeps a range of spending and stores the results in a SweepSeries.
End of explanation
"""
spending_array = linspace(0, 1200, 20)
infected_sweep = sweep_hand_washing(spending_array)
"""
Explanation: Here's how we run it.
End of explanation
"""
plot(infected_sweep)
decorate(xlabel='Hand-washing campaign spending (USD)',
ylabel='Total fraction infected',
title='Effect of hand washing on total infections',
legend=False)
savefig('figs/chap12-fig03.pdf')
"""
Explanation: And here's what it looks like.
End of explanation
"""
num_students = 90
budget = 1200
price_per_dose = 100
max_doses = int(budget / price_per_dose)
dose_array = linrange(max_doses, endpoint=True)
max_doses
"""
Explanation: Now let's put it all together to make some public health spending decisions.
Optimization
Suppose we have \$1200 to spend on any combination of vaccines and a hand-washing campaign.
End of explanation
"""
for doses in dose_array:
fraction = doses / num_students
spending = budget - doses * price_per_dose
system = make_system(beta, gamma)
add_immunization(system, fraction)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
print(doses, system.init.S, system.beta, calc_total_infected(results))
"""
Explanation: We can sweep through a range of doses from, 0 to max_doses, model the effects of immunization and the hand-washing campaign, and run simulations.
For each scenario, we compute the fraction of students who get sick.
End of explanation
"""
def sweep_doses(dose_array):
"""Runs simulations with different doses and campaign spending.
dose_array: range of values for number of vaccinations
return: Sweep object with total number of infections
"""
sweep = SweepSeries()
for doses in dose_array:
fraction = doses / num_students
spending = budget - doses * price_per_dose
system = make_system(beta, gamma)
add_immunization(system, fraction)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
sweep[doses] = calc_total_infected(results)
return sweep
"""
Explanation: The following function wraps that loop and stores the results in a Sweep object.
End of explanation
"""
infected_sweep = sweep_doses(dose_array)
"""
Explanation: Now we can compute the number of infected students for each possible allocation of the budget.
End of explanation
"""
plot(infected_sweep)
decorate(xlabel='Doses of vaccine',
ylabel='Total fraction infected',
title='Total infections vs. doses',
legend=False)
savefig('figs/chap12-fig04.pdf')
"""
Explanation: And plot the results.
End of explanation
"""
# Solution goes here
"""
Explanation: Exercises
Exercise: Suppose the price of the vaccine drops to $50 per dose. How does that affect the optimal allocation of the spending?
Exercise: Suppose we have the option to quarantine infected students. For example, a student who feels ill might be moved to an infirmary, or a private dorm room, until they are no longer infectious.
How might you incorporate the effect of quarantine in the SIR model?
End of explanation
"""
|
DJCordhose/ai | notebooks/rl/berater-v11-lower.ipynb | mit | !pip install git+https://github.com/openai/baselines >/dev/null
!pip install gym >/dev/null
"""
Explanation: <a href="https://colab.research.google.com/github/DJCordhose/ai/blob/master/notebooks/rl/berater-v11-lower.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Berater Environment v11 lower
less stops per consultant
Installation (required for colab)
End of explanation
"""
import numpy as np
import random
import gym
from gym.utils import seeding
from gym import spaces
def state_name_to_int(state):
state_name_map = {
'S': 0,
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'H': 8,
'K': 9,
'L': 10,
'M': 11,
'N': 12,
'O': 13
}
return state_name_map[state]
def int_to_state_name(state_as_int):
state_map = {
0: 'S',
1: 'A',
2: 'B',
3: 'C',
4: 'D',
5: 'E',
6: 'F',
7: 'G',
8: 'H',
9: 'K',
10: 'L',
11: 'M',
12: 'N',
13: 'O'
}
return state_map[state_as_int]
class BeraterEnv(gym.Env):
"""
The Berater Problem
Actions:
There are 4 discrete deterministic actions, each choosing one direction
"""
metadata = {'render.modes': ['ansi']}
showStep = False
showDone = True
envEpisodeModulo = 100
def __init__(self):
# self.map = {
# 'S': [('A', 100), ('B', 400), ('C', 200 )],
# 'A': [('B', 250), ('C', 400), ('S', 100 )],
# 'B': [('A', 250), ('C', 250), ('S', 400 )],
# 'C': [('A', 400), ('B', 250), ('S', 200 )]
# }
self.map = {
'S': [('A', 300), ('B', 100), ('C', 200 )],
'A': [('S', 300), ('B', 100), ('E', 100 ), ('D', 100 )],
'B': [('S', 100), ('A', 100), ('C', 50 ), ('K', 200 )],
'C': [('S', 200), ('B', 50), ('M', 100 ), ('L', 200 )],
'D': [('A', 100), ('F', 50)],
'E': [('A', 100), ('F', 100), ('H', 100)],
'F': [('D', 50), ('E', 100), ('G', 200)],
'G': [('F', 200), ('O', 300)],
'H': [('E', 100), ('K', 300)],
'K': [('B', 200), ('H', 300)],
'L': [('C', 200), ('M', 50)],
'M': [('C', 100), ('L', 50), ('N', 100)],
'N': [('M', 100), ('O', 100)],
'O': [('N', 100), ('G', 300)]
}
max_paths = 4
self.action_space = spaces.Discrete(max_paths)
positions = len(self.map)
# observations: position, reward of all 4 local paths, rest reward of all locations
# non existing path is -1000 and no position change
# look at what #getObservation returns if you are confused
low = np.append(np.append([0], np.full(max_paths, -1000)), np.full(positions, 0))
high = np.append(np.append([positions - 1], np.full(max_paths, 1000)), np.full(positions, 1000))
self.observation_space = spaces.Box(low=low,
high=high,
dtype=np.float32)
self.reward_range = (-1, 1)
self.totalReward = 0
self.stepCount = 0
self.isDone = False
self.envReward = 0
self.envEpisodeCount = 0
self.envStepCount = 0
self.reset()
self.optimum = self.calculate_customers_reward()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def iterate_path(self, state, action):
paths = self.map[state]
if action < len(paths):
return paths[action]
else:
# sorry, no such action, stay where you are and pay a high penalty
return (state, 1000)
def step(self, action):
destination, cost = self.iterate_path(self.state, action)
lastState = self.state
customerReward = self.customer_reward[destination]
reward = (customerReward - cost) / self.optimum
self.state = destination
self.customer_visited(destination)
done = destination == 'S' and self.all_customers_visited()
stateAsInt = state_name_to_int(self.state)
self.totalReward += reward
self.stepCount += 1
self.envReward += reward
self.envStepCount += 1
if self.showStep:
print( "Episode: " + ("%4.0f " % self.envEpisodeCount) +
" Step: " + ("%4.0f " % self.stepCount) +
lastState + ' --' + str(action) + '-> ' + self.state +
' R=' + ("% 2.2f" % reward) + ' totalR=' + ("% 3.2f" % self.totalReward) +
' cost=' + ("%4.0f" % cost) + ' customerR=' + ("%4.0f" % customerReward) + ' optimum=' + ("%4.0f" % self.optimum)
)
if done and not self.isDone:
self.envEpisodeCount += 1
if BeraterEnv.showDone:
episodes = BeraterEnv.envEpisodeModulo
if (self.envEpisodeCount % BeraterEnv.envEpisodeModulo != 0):
episodes = self.envEpisodeCount % BeraterEnv.envEpisodeModulo
print( "Done: " +
("episodes=%6.0f " % self.envEpisodeCount) +
("avgSteps=%6.2f " % (self.envStepCount/episodes)) +
("avgTotalReward=% 3.2f" % (self.envReward/episodes) )
)
if (self.envEpisodeCount%BeraterEnv.envEpisodeModulo) == 0:
self.envReward = 0
self.envStepCount = 0
self.isDone = done
observation = self.getObservation(stateAsInt)
info = {"from": self.state, "to": destination}
return observation, reward, done, info
def getObservation(self, position):
result = np.array([ position,
self.getPathObservation(position, 0),
self.getPathObservation(position, 1),
self.getPathObservation(position, 2),
self.getPathObservation(position, 3)
],
dtype=np.float32)
all_rest_rewards = list(self.customer_reward.values())
result = np.append(result, all_rest_rewards)
return result
def getPathObservation(self, position, path):
source = int_to_state_name(position)
paths = self.map[self.state]
if path < len(paths):
target, cost = paths[path]
reward = self.customer_reward[target]
result = reward - cost
else:
result = -1000
return result
def customer_visited(self, customer):
self.customer_reward[customer] = 0
def all_customers_visited(self):
return self.calculate_customers_reward() == 0
def calculate_customers_reward(self):
sum = 0
for value in self.customer_reward.values():
sum += value
return sum
def modulate_reward(self):
number_of_customers = len(self.map) - 1
number_per_consultant = int(number_of_customers/3)
self.customer_reward = {
'S': 0
}
for customer_nr in range(1, number_of_customers + 1):
self.customer_reward[int_to_state_name(customer_nr)] = 0
# every consultant only visits a few random customers
samples = random.sample(range(1, number_of_customers + 1), k=number_per_consultant)
key_list = list(self.customer_reward.keys())
for sample in samples:
self.customer_reward[key_list[sample]] = 1000
def reset(self):
self.totalReward = 0
self.stepCount = 0
self.isDone = False
self.modulate_reward()
self.state = 'S'
return self.getObservation(state_name_to_int(self.state))
def render(self):
print(self.customer_reward)
env = BeraterEnv()
print(env.reset())
print(env.customer_reward)
"""
Explanation: Environment
End of explanation
"""
BeraterEnv.showStep = True
BeraterEnv.showDone = True
env = BeraterEnv()
print(env)
observation = env.reset()
print(observation)
for t in range(1000):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
print(observation)
"""
Explanation: Try out Environment
End of explanation
"""
from copy import deepcopy
import json
class Baseline():
def __init__(self, env, max_reward, verbose=1):
self.env = env
self.max_reward = max_reward
self.verbose = verbose
self.reset()
def reset(self):
self.map = self.env.map
self.rewards = self.env.customer_reward.copy()
def as_string(self, state):
# reward/cost does not hurt, but is useless, path obsucres same state
new_state = {
'rewards': state['rewards'],
'position': state['position']
}
return json.dumps(new_state, sort_keys=True)
def is_goal(self, state):
if state['position'] != 'S': return False
for reward in state['rewards'].values():
if reward != 0: return False
return True
def expand(self, state):
states = []
for position, cost in self.map[state['position']]:
new_state = deepcopy(state)
new_state['position'] = position
new_state['rewards'][position] = 0
reward = state['rewards'][position]
new_state['reward'] += reward
new_state['cost'] += cost
new_state['path'].append(position)
states.append(new_state)
return states
def search(self, root, max_depth = 25):
closed = set()
open = [root]
while open:
state = open.pop(0)
if self.as_string(state) in closed: continue
closed.add(self.as_string(state))
depth = len(state['path'])
if depth > max_depth:
if self.verbose > 0:
print("Visited:", len(closed))
print("Reached max depth, without reaching goal")
return None
if self.is_goal(state):
scaled_reward = (state['reward'] - state['cost']) / self.max_reward
state['scaled_reward'] = scaled_reward
if self.verbose > 0:
print("Scaled reward:", scaled_reward)
print("Perfect path", state['path'])
return state
expanded = self.expand(state)
open += expanded
# make this best first
open.sort(key=lambda state: state['cost'])
def find_optimum(self):
initial_state = {
'rewards': self.rewards.copy(),
'position': 'S',
'reward': 0,
'cost': 0,
'path': ['S']
}
return self.search(initial_state)
def benchmark(self, model, sample_runs=100):
self.verbose = 0
BeraterEnv.showStep = False
BeraterEnv.showDone = False
perfect_rewards = []
model_rewards = []
for run in range(sample_runs):
observation = self.env.reset()
self.reset()
optimum_state = self.find_optimum()
perfect_rewards.append(optimum_state['scaled_reward'])
state = np.zeros((1, 2*128))
dones = np.zeros((1))
for t in range(1000):
actions, _, state, _ = model.step(observation, S=state, M=dones)
observation, reward, done, info = self.env.step(actions[0])
if done:
break
model_rewards.append(env.totalReward)
return perfect_rewards, model_rewards
def score(self, model, sample_runs=100):
perfect_rewards, model_rewards = self.benchmark(model, sample_runs=100)
perfect_score_mean, perfect_score_std = np.array(perfect_rewards).mean(), np.array(perfect_rewards).std()
test_score_mean, test_score_std = np.array(model_rewards).mean(), np.array(model_rewards).std()
return perfect_score_mean, perfect_score_std, test_score_mean, test_score_std
"""
Explanation: Baseline
End of explanation
"""
!rm -r logs
!mkdir logs
!mkdir logs/berater
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
"""
Explanation: Train model
Estimation
* total cost when travelling all paths (back and forth): 2500
* all rewards: 6000
* but: rewards are much more sparse while routes stay the same, maybe expect less
* estimate: no illegal moves and between
* half the travel cost: (6000 - 1250) / 6000 = .79
* and full traval cost (6000 - 2500) / 6000 = 0.58
* additionally: the agent only sees very little of the whole scenario
* changes with every episode
* was ok when network can learn fixed scenario
End of explanation
"""
# copied from https://github.com/openai/baselines/blob/master/baselines/a2c/utils.py
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
# copied from https://github.com/openai/baselines/blob/master/baselines/common/models.py#L31
def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
"""
Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder
"""
def network_fn(X):
# print('network_fn called')
# Tensor("ppo2_model_4/Ob:0", shape=(1, 19), dtype=float32)
# Tensor("ppo2_model_4/Ob_1:0", shape=(512, 19), dtype=float32)
# print (X)
h = tf.layers.flatten(X)
for i in range(num_layers):
h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
# Tensor("ppo2_model_4/pi/Tanh_2:0", shape=(1, 500), dtype=float32)
# Tensor("ppo2_model_4/pi_2/Tanh_2:0", shape=(512, 500), dtype=float32)
# print(h)
return h
return network_fn
"""
Explanation: Step 1: Extract MLP builder from openai sources
End of explanation
"""
# first the dense layer
def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = tf.layers.dense(h, units=num_hidden, kernel_initializer=ortho_init(np.sqrt(2)))
# h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn
# then initializer, relu activations
def mlp(num_layers=2, num_hidden=64, activation=tf.nn.relu, layer_norm=False):
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = tf.layers.dense(h, units=num_hidden, kernel_initializer=tf.initializers.glorot_uniform(seed=13))
if layer_norm:
# h = tf.layers.batch_normalization(h, center=True, scale=True)
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn
%%time
# https://github.com/openai/baselines/blob/master/baselines/deepq/experiments/train_pong.py
# log_dir = logger.get_dir()
log_dir = '/content/logs/berater/'
import gym
from baselines import bench
from baselines import logger
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.vec_monitor import VecMonitor
from baselines.ppo2 import ppo2
BeraterEnv.showStep = False
BeraterEnv.showDone = False
env = BeraterEnv()
wrapped_env = DummyVecEnv([lambda: BeraterEnv()])
monitored_env = VecMonitor(wrapped_env, log_dir)
# https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py
# https://github.com/openai/baselines/blob/master/baselines/common/models.py#L30
# https://arxiv.org/abs/1607.06450 for layer_norm
# lr linear from lr=1e-2 to lr=1e-4 (default lr=3e-4)
def lr_range(frac):
# we get the remaining updates between 1 and 0
start_lr = 1e-2
end_lr = 1e-4
diff_lr = start_lr - end_lr
lr = end_lr + diff_lr * frac
return lr
network = mlp(num_hidden=500, num_layers=3, layer_norm=True)
model = ppo2.learn(
env=monitored_env,
network=network,
lr=lr_range,
gamma=1.0,
ent_coef=0.05,
total_timesteps=1000000)
# model = ppo2.learn(
# env=monitored_env,
# network='mlp',
# num_hidden=500,
# num_layers=3,
# layer_norm=True,
# lr=lr_range,
# gamma=1.0,
# ent_coef=0.05,
# total_timesteps=500000)
# model.save('berater-ppo-v11.pkl')
monitored_env.close()
"""
Explanation: Step 2: Replace exotic parts
Steps:
1. Low level matmul replaced with dense layer (no need for custom code here)
* https://www.tensorflow.org/api_docs/python/tf/layers
* https://www.tensorflow.org/api_docs/python/tf/layers/Dense
initializer changed to best practice glorot uniform, but does not give reliable results, so use seed
use relu activations (should train faster)
standard batch normalization does not train with any configuration (no idea why), so we need to keep layer normalization
1.Dropout and L2 would be nice as well, but easy to do within the boundaries of the OpenAI framework: https://stackoverflow.com/questions/38292760/tensorflow-introducing-both-l2-regularization-and-dropout-into-the-network-do
Alternative: Using Keras API
Not done here, as no big benefit expected and would need to be integrated into surrounding low level tensorflow model. Need to reuse session. If you want to do this, be sure to check at least the first link
using Keras within TensorFlow model: https://blog.keras.io/keras-as-a-simplified-interface-to-tensorflow-tutorial.html
https://stackoverflow.com/questions/46790506/calling-a-keras-model-on-a-tensorflow-tensor-but-keep-weights
https://www.tensorflow.org/api_docs/python/tf/get_default_session
https://www.tensorflow.org/api_docs/python/tf/keras/backend/set_session
End of explanation
"""
# !ls -l $log_dir
from baselines.common import plot_util as pu
results = pu.load_results(log_dir)
import matplotlib.pyplot as plt
import numpy as np
r = results[0]
plt.ylim(0, .75)
# plt.plot(np.cumsum(r.monitor.l), r.monitor.r)
plt.plot(np.cumsum(r.monitor.l), pu.smooth(r.monitor.r, radius=100))
"""
Explanation: Visualizing Results
https://github.com/openai/baselines/blob/master/docs/viz/viz.ipynb
End of explanation
"""
import numpy as np
observation = env.reset()
env.render()
baseline = Baseline(env, max_reward=4000)
state = np.zeros((1, 2*128))
dones = np.zeros((1))
BeraterEnv.showStep = True
BeraterEnv.showDone = False
for t in range(1000):
actions, _, state, _ = model.step(observation, S=state, M=dones)
observation, reward, done, info = env.step(actions[0])
if done:
print("Episode finished after {} timesteps, reward={}".format(t+1, env.totalReward))
break
env.close()
%time baseline.find_optimum()
"""
Explanation: Enjoy model
End of explanation
"""
baseline = Baseline(env, max_reward=4000)
perfect_score_mean, perfect_score_std, test_score_mean, test_score_std = baseline.score(model, sample_runs=100)
# perfect scores
perfect_score_mean, perfect_score_std
# test scores for our model
test_score_mean, test_score_std
"""
Explanation: Evaluation
End of explanation
"""
|
Linlinzhao/linlinzhao.github.io | _drafts/.ipynb_checkpoints/understanding backward() in Pytorch-checkpoint.ipynb | mit | import torch as T
import torch.autograd
from torch.autograd import Variable
import numpy as np
"""
Explanation: Having heard about the announcement about Theano from Bengio lab , as a Theano user, I am happy and sad to see the fading of the old hero, caused by many raising stars. Sad to see it is too old to compete with its industrial competitors, and happy to have so many excellent deep learning frameworks to choose from. Recently I started translating some of my old codes to Pytorch and have been really impressed by its dynamic nature and clearness. But at the very beginning, I was very confused by the backward() function when reading the tutorials and documentations. This motivated me to write this post in order for other Pytorch beginners to ease the understanding a bit. And I'll assume that you already know the autograd module and what a Variable is, but are a little confused by definition of backward().
First let's recall the gradient computing under mathematical notions. For an independent variable $x$ (scalar or vector), the whatever operation on $x$ is $y = f(x)$. Then the gradient of $y$ w.r.t $x_i$s is
$$\begin{align}\nabla y&=\begin{bmatrix}
\frac{\partial y}{\partial x_1}\
\frac{\partial y}{\partial x_2}\
\vdots
\end{bmatrix}
\end{align}.
$$
Then for a specific point of $x=[X_1, X_2, \dots]$, we'll get the gradient of $y$ on that point as a vector. With these notions in my mind, those things are a bit confusing at the beginning
Mathematically, we would say "The gradients of a function w.r.t. the independent variables", whereas the .grad is attached to the leaf Variables. In Theano and Tensorflow, the computed gradients are stored separately in a variable. But with a memont of adjustment, it is fairly easy to buy that. In Pytorch it is also possible to get the .grad for intermediate Variables with help of register_hook function
The parameter grad_variables of the function torch.autograd.backward(variables, grad_variables=None, retain_graph=None, create_graph=None, retain_variables=None) is not straightforward for knowing its functionality.
What is retain_graph doing?
End of explanation
"""
'''
Define a scalar variable, set requires_grad to be true to add it to backward path for computing gradients
It is actually very simple to use backward()
first define the computation graph, then call backward()
'''
x = Variable(T.randn(1, 1), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
print('y', y)
#define one more operation to check the chain rule
z = y ** 3
print('z', z)
"""
Explanation: Simplicity of using backward()
End of explanation
"""
#yes, it is just as simple as this to compute gradients:
z.backward()
print('z gradient', z.grad)
print('y gradient', y.grad)
print('x gradient', x.grad) # note that x.grad is also a Variable
"""
Explanation: The simple operations defined a forward path $z=(2x)^3$, $z$ will be the final output Variable we would like to compute gradient: $dz=24x^2dx$, which will be passed to the parameter Variables in backward() function.
End of explanation
"""
x = Variable(T.randn(1, 1), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#define one more operation to check the chain rule
z = y ** 3
z.backward(T.FloatTensor([1]), retain_graph=True)
print('Keeping the default value of grad_variables gives')
print('z gradient', z.grad)
print('y gradient', y.grad)
print('x gradient', x.grad)
x.grad.data.zero_()
z.backward(T.FloatTensor([0.1]), retain_graph=True)
print('Modifying the default value of grad_variables to 0.1 gives')
print('z gradient', z.grad)
print('y gradient', y.grad)
print('x gradient', x.grad)
"""
Explanation: The gradients of both $y$ and $z$ are None, since the function returns the gradient for the leaves, which is $x$ in this case. At the very beginning, I was assuming something like this:
x gradient None
y gradient None
z gradient Variable containing:
128.3257
[torch.FloatTensor of size 1x1],
since the gradient is for the final output $z$.
With a blink of thinking, we could figure out it would be practically chaos if $x$ is a multi-dimensional vector. x.grad should be interpreted as the gradient of $z$ at $x$.
How do we use grad_variables?
grad_variables should be a list of torch tensors. In default case, the backward() is applied to scalar-valued function, the default value of grad_variables is thus torch.FloatTensor([1]). But why is that? What if we put some other values to it?
End of explanation
"""
'''
Try to set x to be column vector or row vector! You'll see different behaviors.
'''
x = Variable(T.randn(2, 2), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#define one more operation to check the chain rule
z = y ** 3
print('z shape:', z.size())
z.backward(T.FloatTensor([1, 0]), retain_graph=True)
print('x gradient', x.grad)
x.grad.data.zero_() #the gradient for x will be accumulated, it needs to be cleared.
z.backward(T.FloatTensor([0, 1]), retain_graph=True)
print('x gradient', x.grad)
x.grad.data.zero_()
z.backward(T.FloatTensor([1, 1]), retain_graph=True)
print('x gradient', x.grad)
"""
Explanation: Now let's set $x$ to be a matrix. Note that $z$ will also be a matrix.
End of explanation
"""
x = Variable(T.randn(2, 2), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#print('y', y)
#define one more operation to check the chain rule
z = y ** 3
out = z.mean()
print('out', out)
out.backward(T.FloatTensor([1]), retain_graph=True)
print('x gradient', x.grad)
x.grad.data.zero_()
out.backward(T.FloatTensor([0.1]), retain_graph=True)
print('x gradient', x.grad)
"""
Explanation: We can clearly see the gradients of $z$ are computed w.r.t to each dimension of $x$, because the operations are all element-wise. T.FloatTensor([1, 0]) will give the gradients for first column of $x$.
Then what if we render the output one-dimensional (scalar) while $x$ is two-dimensional. This is a real simplified scenario of neural networks.
$$f(x)=\frac{1}{n}\sum_i^n(2x_i)^3$$
$$f'(x)=\frac{1}{n}\sum_i^n24x_i^2$$
End of explanation
"""
x = Variable(T.randn(2, 2), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#print('y', y)
#define one more operation to check the chain rule
z = y ** 3
out = z.mean()
print('out', out)
out.backward(T.FloatTensor([1])) #without setting retain_graph to be true, this gives an error.
print('x gradient', x.grad)
x.grad.data.zero_()
out.backward(T.FloatTensor([0.1]))
print('x gradient', x.grad)
"""
Explanation: What is retain_graph doing?
When training a model, the graph will be re-generated for each iteration. Therefore each iteration will consume the graph if the retain_graph is false, in order to keep the graph, we need to set it be true.
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/r1/tutorials/eager/custom_layers.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import tensorflow.compat.v1 as tf
"""
Explanation: 사용자 정의 층
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/r1/tutorials/eager/custom_layers.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/r1/tutorials/eager/custom_layers.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a>
</td>
</table>
Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다.
이 번역에 개선할 부분이 있다면
tensorflow/docs 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
문서 번역이나 리뷰에 참여하려면
docs-ko@tensorflow.org로
메일을 보내주시기 바랍니다.
신경망을 구축하기 위해서 고수준 API인 tf.keras를 사용하길 권합니다. 대부분의 텐서플로 API는 즉시 실행(eager execution)과 함께 사용할 수 있습니다.
End of explanation
"""
# tf.keras.layers 패키지에서 층은 객체입니다. 층을 구성하려면 간단히 객체를 생성하십시오.
# 대부분의 layer는 첫번째 인수로 출력 차원(크기) 또는 채널을 취합니다.
layer = tf.keras.layers.Dense(100)
# 입력 차원의 수는 층을 처음 실행할 때 유추할 수 있기 때문에 종종 불필요합니다.
# 일부 복잡한 모델에서는 수동으로 입력 차원의 수를 제공하는것이 유용할 수 있습니다.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
"""
Explanation: 층: 유용한 연산자 집합
머신러닝을 위한 코드를 작성하는 대부분의 경우에 개별적인 연산과 변수를 조작하는 것보다는 높은 수준의 추상화에서 작업할 것입니다.
많은 머신러닝 모델은 비교적 단순한 층(layer)을 조합하고 쌓아서 표현가능합니다. 또한 텐서플로는 여러 표준형 층을 제공하므로 사용자 고유의 응용 프로그램에 관련된 층을 처음부터 작성하거나, 기존 층의 조합으로 쉽게 만들 수 있습니다.
텐서플로는 전체 케라스 API를 tf.keras 패키지에 포함하고 있습니다. 케라스 층은 모델을 구축하는데 매우 유용합니다.
End of explanation
"""
# 층을 사용하려면, 간단하게 호출합니다.
layer(tf.zeros([10, 5]))
# layer는 유용한 메서드를 많이 가지고 있습니다. 예를 들어, `layer.variables`를 사용하여 층안에 있는 모든 변수를 확인할 수 있으며,
# `layer.trainable_variables`를 사용하여 훈련가능한 변수를 확인할 수 있습니다.
# 완전 연결(fully-connected)층은 가중치(weight)와 편향(biases)을 위한 변수를 가집니다.
layer.variables
# 또한 변수는 객체의 속성을 통해 편리하게 접근가능합니다.
layer.kernel, layer.bias
"""
Explanation: 미리 구성되어있는 층은 다음 문서에서 확인할 수 있습니다. Dense(완전 연결 층), Conv2D, LSTM, BatchNormalization, Dropout, 등을 포함하고 있습니다.
End of explanation
"""
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
"""
Explanation: 사용자 정의 층 구현
사용자 정의 층을 구현하는 가장 좋은 방법은 tf.keras.Layer 클래스를 상속하고 다음과 같이 구현하는 것입니다.
* __init__ 에서 층에 필요한 매개변수를 입력 받습니다..
* build, 입력 텐서의 크기를 알고 나머지를 초기화 할 수 있습니다.
* call, 정방향 연산(forward computation)을 진행 할 수 있습니다.
변수를 생성하기 위해 build가 호출되길 기다릴 필요가 없다는 것에 주목하세요. 또한 변수를 __init__에 생성할 수도 있습니다. 그러나 build에 변수를 생성하는 유리한 점은 층이 작동할 입력의 크기를 기준으로 나중에 변수를 만들 수 있다는 것입니다. 반면에, __init__에 변수를 생성하는 것은 변수 생성에 필요한 크기가 명시적으로 지정되어야 함을 의미합니다.
End of explanation
"""
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
"""
Explanation: 다른 독자가 표준형 층의 동작을 잘 알고 있기 때문에, 가능한 경우 표준형 층을 사용하는것이 전체 코드를 읽고 유지하는데 더 쉽습니다. 만약 tf.keras.layers 또는 tf.contrib.layers에 없는 층을 사용하기 원하면 깃허브에 이슈화하거나, 풀 리퀘스트(pull request)를 보내세요.
모델: 층 구성
머신러닝 모델에서 대부분의 재미있는 많은 것들은 기존의 층을 조합하여 구현됩니다. 예를 들어, 레스넷(resnet)의 각 잔여 블록(residual block)은 합성곱(convolution), 배치 정규화(batch normalization), 쇼트컷(shortcut) 등으로 구성되어 있습니다.
다른층을 포함한 모델을 만들기 위해 사용하는 메인 클래스는 tf.keras.Model입니다. 다음은 tf.keras.Model을 상속(inheritance)하여 구현한 코드입니다.
End of explanation
"""
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
"""
Explanation: 그러나 대부분의 경우에, 많은 층으로 구성된 모델은 간단하게 연이어 하나의 층으로 호출할 수 있습니다. 이는 tf.keras.Sequential 사용하여 간단한 코드로 구현 가능합니다.
End of explanation
"""
|
phobson/pygridtools | docs/tutorial/04_InteractiveWidgets.ipynb | bsd-3-clause | from IPython.display import Audio,Image, YouTubeVideo
YouTubeVideo('S5SG9km2f_A', height=450, width=900)
"""
Explanation: Grid Generation with Interactive Widgets
This notebook demostrates how to use the interative widgets.
See a version of it in action:
End of explanation
"""
%matplotlib inline
import warnings
warnings.simplefilter('ignore')
import numpy as np
import matplotlib.pyplot as plt
import pandas
import geopandas
from pygridgen import Gridgen
from pygridtools import viz, iotools
def plotter(x, y, **kwargs):
figsize = kwargs.pop('figsize', (9, 9))
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
viz.plot_domain(domain, betacol='beta', ax=ax)
ax.set_xlim([0, 25])
ax.set_ylim([0, 25])
return viz.plot_cells(x, y, ax=ax, **kwargs)
"""
Explanation: Main Tutorial
End of explanation
"""
domain = geopandas.read_file('basic_data/domain.geojson')
fig, ax = plt.subplots(figsize=(9, 9), subplot_kw={'aspect':'equal'})
fig = viz.plot_domain(domain, betacol='beta', ax=ax)
"""
Explanation: Loading and plotting the boundary data
End of explanation
"""
grid = Gridgen(domain.geometry.x, domain.geometry.y,
domain.beta, shape=(50, 50), ul_idx=2)
fig_orig, artists = plotter(grid.x, grid.y)
"""
Explanation: Generating a grid with pygridgen, plotting with pygridtools
End of explanation
"""
focus, focuser_widget = iotools.interactive_grid_focus(grid, n_points=3, plotfxn=plotter)
focuser_widget
"""
Explanation: Interactively manipulate the Focus
End of explanation
"""
reshaped, shaper_widget = iotools.interactive_grid_shape(grid, max_n=100, plotfxn=plotter)
shaper_widget
fig_orig
"""
Explanation: Interactively change the number of nodes in the grid
(Notice how the focus stay where we want)
End of explanation
"""
import json
from pathlib import Path
from tempfile import TemporaryDirectory
with TemporaryDirectory() as td:
f = Path(td, 'widget_grid.json')
with f.open('w') as grid_write:
json.dump(grid.to_spec(), grid_write)
with f.open('r') as grid_read:
spec = json.load(grid_read)
new_grid = Gridgen.from_spec(spec)
plotter(new_grid.x, new_grid.y)
"""
Explanation: Save, load, and recreate the altered grid without widgets
End of explanation
"""
|
tkas/osmose-backend | doc/3_0-SQL-minimal.ipynb | gpl-3.0 | sql10 = """
SELECT
nodes.id,
ST_AsText(nodes.geom) AS geom
FROM
nodes
JOIN ways ON
ways.tags != ''::hstore AND
ways.tags?'building' AND ways.tags->'building' != 'no' AND
ways.is_polygon AND
ST_Intersects(ST_MakePolygon(ways.linestring), nodes.geom)
WHERE
nodes.tags != ''::hstore AND
nodes.tags?'amenity' AND nodes.tags->'amenity' = 'pharmacy'
"""
"""
Explanation: Minimal analyzer based on SQL query
The kind of analyzer based on SQL query is relevant when want to check geometry or relation between multiple objects. The result of the query is used to fill the Osmose issue report.
For explanation purpose only, we just here make an analyzer that report pharmacy as node inside a building polygon, it is not looking for issue in the data.
sql
SELECT
-- We report pharmacy nodes osm id and location
nodes.id,
ST_AsText(nodes.geom) AS geom
FROM
nodes
JOIN ways ON
-- Use the index on tags
ways.tags != ''::hstore AND
-- Look for ways with valid building tag
ways.tags?'building' AND ways.tags->'building' != 'no' AND
-- Look for way as valid polygon
ways.is_polygon AND
-- Use the spatial index for ways bbox crossing the node location
ways.linestring && nodes.geom AND
-- Ensure the node is inside the polygon
-- (ST_Intersects call it self the spatial crossing with bbox, so in this case it not necessary)
ST_Intersects(ST_MakePolygon(ways.linestring), nodes.geom)
WHERE
-- Use the index on tags
nodes.tags != ''::hstore AND
-- Look for node with tag amenity=pharmacy
nodes.tags?'amenity' AND nodes.tags->'amenity' = 'pharmacy'
End of explanation
"""
%cd "/opt/osmose-backend/"
from modules.OsmoseTranslation import T_
from analysers.Analyser_Osmosis import Analyser_Osmosis
class Analyser_Pharmacy_Building(Analyser_Osmosis):
def __init__(self, config, logger = None):
super().__init__(config, logger)
# Define Osmose issue class id 1
self.classs[1] = self.def_class(
item = 2010,
level = 1,
tags = ['building'],
title = T_('Pharmacy node in Building')
)
def analyser_osmosis_common(self):
# Run the SQL query
self.run(sql10, lambda res: {
# For each result, create an osmose issue of class 1
'class': 1,
# Explain how to interpret the returned fields from query
'data': [self.node_full, self.positionAsText]
})
"""
Explanation: We have to create an inherited class from Analyser_Osmosis. The __init__() setup the meta information of produced issues. It defines a class id for Osmose issues.
analyser_osmosis_common() run the query and build the Osmose issues. For each row returned by the query, an Osmose issue is created using the lambda function. It should at least return:
* class refer to the class id definition,
* data: must match the result row definition from the query.
End of explanation
"""
import osmose_config as config
country_conf = config.config['monaco']
country_conf.init()
country_conf.analyser_options
from modules.jupyter import *
csv = run(country_conf, Analyser_Pharmacy_Building, format = 'csv')
print_csv(csv)
geojson = run(country_conf, Analyser_Pharmacy_Building, format = 'geojson')
print_geojson(geojson, limit = 100)
"""
Explanation: To run the analyze we need a context of execution. Each country or area have a entry in the file osmose_config.py.
End of explanation
"""
|
bollwyvl/ipytangle | notebooks/examples/Tangling up interact.ipynb | bsd-3-clause | from IPython.html.widgets import interact
from math import (sin, cos, tan)
from ipytangle import tangle
"""
Explanation: Tangling up interact
IPython's interact can do some things that are awkward with straight widgets, such as generating plots. It will magically make built-in widgets from some simple settings objects.
End of explanation
"""
@interact
def interactor(fn=dict(sin=sin, cos=cos, tan=tan), x=(0, 360)):
print(fn(x))
"""
Explanation: If you have defined an interact function, you can pull out all of the variables and put them in a tangle.
<div class="alert alert-info">
Because you can only have one tangle per page right now, see [Interacting with a tangle](./Interacting with a tangle.ipynb).
</div>
End of explanation
"""
trig_talk = tangle(interactor)
trig_talk
"""
Explanation: The fn_label function
The fn_label of x is... some number.
End of explanation
"""
|
mkuron/espresso | doc/tutorials/08-visualization/08-visualization.ipynb | gpl-3.0 | from matplotlib import pyplot
import espressomd
import numpy
espressomd.assert_features("LENNARD_JONES")
# system parameters (10000 particles)
box_l = 10.7437
density = 0.7
# interaction parameters (repulsive Lennard-Jones)
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# integration parameters
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.time_step = 0.0001
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 100
#############################################################
# Setup System #
#############################################################
# interaction setup
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
# particle setup
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.dist_to(0)
act_min_dist = system.analysis.min_dist()
#############################################################
# Warmup Integration #
#############################################################
# set LJ cap
lj_cap = 20
system.force_cap = lj_cap
# warmup integration loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(warm_steps)
# warmup criterion
act_min_dist = system.analysis.min_dist()
i += 1
# increase LJ cap
lj_cap = lj_cap + 10
system.force_cap = lj_cap
#############################################################
# Integration #
#############################################################
# remove force capping
lj_cap = 0
system.force_cap = lj_cap
def main():
for i in range(int_n_times):
print("\rrun %d at time=%.0f " % (i, system.time), end='')
system.integrator.run(int_steps)
print('\rSimulation complete')
main()
"""
Explanation: Tutorial 8: Visualization
Introduction
When you are running a simulation, it is often useful to see what is going on
by visualizing particles in a 3D view or by plotting observables over time.
That way, you can easily determine things like whether your choice of parameters
has led to a stable simulation or whether your system has equilibrated. You may
even be able to do your complete data analysis in real time as the simulation progresses.
Thanks to ESPResSo's Python interface, we can make use of standard libraries
like Mayavi or OpenGL (for interactive 3D views) and Matplotlib (for line graphs)
for this purpose. We will also use NumPy, which both of these libraries depend on,
to store data and perform some basic analysis.
Simulation
First, we need to set up a simulation.
We will simulate a simple Lennard-Jones liquid.
End of explanation
"""
matplotlib_notebook = True # toggle this off when outside IPython/Jupyter
# setup matplotlib canvas
pyplot.xlabel("Time")
pyplot.ylabel("Energy")
plot, = pyplot.plot([0], [0])
if matplotlib_notebook:
from IPython import display
else:
pyplot.show(block=False)
# setup matplotlib update function
current_time = -1
def update_plot():
i = current_time
if i < 3:
return None
plot.set_xdata(energies[:i + 1, 0])
plot.set_ydata(energies[:i + 1, 1])
pyplot.xlim(0, energies[i, 0])
pyplot.ylim(energies[:i + 1, 1].min(), energies[:i + 1, 1].max())
# refresh matplotlib GUI
if matplotlib_notebook:
display.clear_output(wait=True)
display.display(pyplot.gcf())
else:
pyplot.draw()
pyplot.pause(0.01)
# re-define the main() function
def main():
global current_time
for i in range(int_n_times):
system.integrator.run(int_steps)
energies[i] = (system.time, system.analysis.energy()['total'])
current_time = i
update_plot()
if matplotlib_notebook:
display.clear_output(wait=True)
system.time = 0 # reset system timer
energies = numpy.zeros((int_n_times, 2))
main()
if not matplotlib_notebook:
pyplot.close()
"""
Explanation: Live plotting
Let's have a look at the total energy of the simulation. We can determine the
individual energies in the system using <tt>system.analysis.energy()</tt>.
We will adapt the <tt>main()</tt> function to store the total energy at each
integration run into a NumPy array. We will also create a function to draw a
plot after each integration run.
End of explanation
"""
from espressomd import visualization
from threading import Thread
visualizer = visualization.openGLLive(system)
# alternative: visualization.mayaviLive(system)
"""
Explanation: Live visualization and plotting
To interact with a live visualization, we need to move the main integration loop into a secondary thread and run the visualizer in the main thread (note that visualization or plotting cannot be run in secondary threads). First, choose a visualizer:
End of explanation
"""
def main():
global current_time
for i in range(int_n_times):
system.integrator.run(int_steps)
energies[i] = (system.time, system.analysis.energy()['total'])
current_time = i
visualizer.update()
system.time = 0 # reset system timer
"""
Explanation: Then, re-define the <tt>main()</tt> function to run the visualizer:
End of explanation
"""
# setup new matplotlib canvas
if matplotlib_notebook:
pyplot.xlabel("Time")
pyplot.ylabel("Energy")
plot, = pyplot.plot([0], [0])
# execute main() in a secondary thread
t = Thread(target=main)
t.daemon = True
t.start()
# execute the visualizer in the main thread
visualizer.register_callback(update_plot, interval=int_steps // 2)
visualizer.start()
"""
Explanation: Next, create a secondary thread for the <tt>main()</tt> function. However,
as we now have multiple threads, and the first thread is already used by
the visualizer, we cannot call <tt>update_plot()</tt> from
the <tt>main()</tt> anymore.
The solution is to register the <tt>update_plot()</tt> function as a
callback of the visualizer:
End of explanation
"""
|
FordyceLab/AcqPack | examples/imaging_and_gui.ipynb | mit | # test image stack
arr = []
for i in range(50):
b = np.random.rand(500,500)
b= (b*(2**16-1)).astype('uint16')
arr.append(b)
# snap (MPL)
button = widgets.Button(description='Snap')
display.display(button)
def on_button_clicked(b):
img=arr.pop()
plt.imshow(img, cmap='gray')
display.clear_output(wait=True)
display.display(plt.gcf())
button.on_click(on_button_clicked)
# snap (CV2)
button = widgets.Button(description='Snap')
display.display(button)
def on_button_clicked(b):
img=arr.pop()
cv2.imshow('Video',img)
cv2.waitKey(30)
button.on_click(on_button_clicked)
"""
Explanation: Snap
End of explanation
"""
import numpy as np
import cv2
from IPython import display
# test image stack
a = []
for i in range(50):
b = np.zeros((500,500))
b[i:i+100, i:i+100]=1.0
b=b*255
b=b.astype('uint8')
a.append(b)
# video (MPL) (slow, doesn't work well)
# for img in a:
# plt.imshow(img, cmap='gray')
# display.clear_output(wait=True)
# display.display(plt.gcf())
# video (CV2)
cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
for img in a:
b = cv2.imshow('Video',img)
cv2.resizeWindow('Video', 500,500)
cv2.moveWindow('Video',0,0)
display.clear_output(wait=True)
print np.random.randn(1)
if cv2.waitKey(30) >= 0:
break
cv2.destroyAllWindows()
# video with button (CV2)
button = widgets.Button(description='Live')
display.display(button)
def on_button_clicked(b):
for img in a:
cv2.imshow('Video',img)
cv2.waitKey(30)
display.clear_output(wait=True)
print np.random.randn(1)
button.on_click(on_button_clicked)
"""
Explanation: Video
http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
End of explanation
"""
button = widgets.ToggleButton(description='Live', value=False)
def on_click(change):
display.clear_output(wait=True)
print change['new']
button.observe(on_click, names='value')
display.display(button)
import time
b1 = widgets.Button(description='b1')
b2 = widgets.Button(description='b2')
def ctrlloop():
def b1_click(b):
for i in range(10):
print 'b1', i
time.sleep(0.5)
def b2_click(b):
for i in range(10):
print 'b2', i
# dl = widgets.jsdlink((button, 'value'), (vid, 'value'))
b1.on_click(b1_click)
b2.on_click(b2_click)
widgets.HBox([b1,b2])
play = widgets.Play(
interval=160,
value=50,
min=0,
max=100,
step=1,
description="Press play",
disabled=False
)
slider = widgets.IntSlider()
widgets.jslink((play, 'value'), (slider, 'value'))
widgets.HBox([play, slider])
f = open('temp.msg','wb')
f.write(str(1))
f.close()
"""
Explanation: GUI and BUTTONS
http://docs.opencv.org/2.4/modules/highgui/doc/user_interface.html
End of explanation
"""
# icons are from "font-awesome"
x_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-left')
x_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-right')
y_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon='arrow-up')
y_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-down')
xy_slider = widgets.VBox([widgets.FloatText(description='speed', width='30%',value=50),widgets.IntSlider(width=100, step=10)])
xy_cluster = widgets.VBox([ widgets.HBox([x_minus,x_plus]), widgets.HBox([y_minus, y_plus]) ])
z_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-up')
z_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-down')
z_slider = widgets.VBox([widgets.FloatText(description='speed', width='30%',value=50),widgets.IntSlider(width=100, step=10)])
z_cluster = widgets.VBox([ z_minus, z_plus])
widgets.HBox([xy_cluster, xy_slider, z_cluster, z_slider])
"""
Explanation: Arrows
End of explanation
"""
|
gronnbeck/udacity-deep-learning | language-translation/dlnd_language_translation.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
"""
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
"""
view_sentence_range = (0, 30)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
eos = [target_vocab_to_int['<EOS>']]
source_prepared = [txt for txt in source_text.split("\n")]
source_id_text = [[source_vocab_to_int[x.strip()] for x in line.split()] for line in source_prepared]
target_prepared = [txt for txt in target_text.strip().split("\n")]
target_id_text = [[target_vocab_to_int[x] for x in line.split()] + eos for line in target_prepared]
return source_id_text, target_id_text
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
"""
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
"""
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
inputs = tf.placeholder(tf.int32, [None, None], name="input")
targets = tf.placeholder(tf.int32, [None, None], name="targets")
learn_rate = tf.placeholder(tf.float32, [], name="learn_rate")
keep_prob = tf.placeholder(tf.float32, [], name="keep_prob")
return inputs, targets, learn_rate, keep_prob
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoding_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Return the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)
End of explanation
"""
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
go = source_vocab_to_int["<GO>"]
ins = tf.strided_slice(target_data, [0,0], [batch_size, -1], [1,1])
dec = tf.concat([tf.fill([batch_size, 1], go), ins], 1)
return dec
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_decoding_input(process_decoding_input)
"""
Explanation: Process Decoding Input
Implement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the beginning of each batch.
End of explanation
"""
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, dtype=tf.float32)
return final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
"""
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().
End of explanation
"""
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
drop = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
dynamic_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
outputs_train, state_train, output_size = tf.contrib.seq2seq.dynamic_rnn_decoder(drop,
dynamic_fn_train,
inputs=dec_embed_input,
sequence_length=sequence_length,
scope=decoding_scope)
return output_fn(outputs_train)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
"""
Explanation: Decoding - Training
Create training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.
End of explanation
"""
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn,
encoder_state,
dec_embeddings,
start_of_sequence_id,
end_of_sequence_id,
maximum_length,
vocab_size)
drop = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
infer_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
drop,
infer_decoder_fn,
scope=decoding_scope)
return infer_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
"""
Explanation: Decoding - Inference
Create inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().
End of explanation
"""
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
"""
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
with tf.variable_scope("decoding") as decoding_scope:
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
train_logits = decoding_layer_train(encoder_state, cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob)
with tf.variable_scope('decoding', reuse=True) as decoding_scope:
infer_logits = decoding_layer_infer(encoder_state, cell, dec_embeddings, target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'], sequence_length,
vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, infer_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
"""
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Create RNN cell for decoding using rnn_size and num_layers.
Create the output fuction using lambda to transform it's input, logits, to class logits.
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
"""
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
"""
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
enc_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob)
dec = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec)
dec_state = decoding_layer(dec_embed_input, dec_embeddings, enc_state, target_vocab_size, sequence_length,
rnn_size, num_layers, target_vocab_to_int, keep_prob)
return dec_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Apply embedding to the input data for the encoder.
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
Apply embedding to the target data for the decoder.
Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
End of explanation
"""
# Number of Epochs
epochs = 8
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 250
# Number of Layers
num_layers = 3
# Embedding Size
encoding_embedding_size = 300
decoding_embedding_size = 300
# Learning Rate
learning_rate = 0.01
# Dropout Keep Probability
keep_probability = 0.7
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import time
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forums to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
"""
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def id_or_unknown(word, vocab_to_int):
if word in vocab_to_int:
return vocab_to_int[word]
else:
return vocab_to_int['<UNK>']
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
sentence = sentence.lower()
ids = [id_or_unknown(word, vocab_to_int) for word in sentence.split()]
return ids
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
"""
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
"""
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
"""
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation
"""
|
dipanjank/ml | algorithms/0_1_knapsack.ipynb | gpl-3.0 | import pandas as pd
import numpy as np
# Solve the problem for this toy example
weights = [5, 4, 6, 3]
values = [10, 50, 30, 50]
W = 10
# Handy data structure to refer to values and weights easily
items_df = pd.DataFrame(
index=range(1, len(weights)+1),
data=list(zip(weights, values)),
columns=['weights', 'values']
)
items_df
# 2D array to store the memoized optimal subproblem solutions
optimal_weights = pd.DataFrame(index=range(len(weights)+1), data=0, columns=range(W+1))
optimal_weights
"""
Explanation: <h1 align="center">0-1 Knapsack Problem using Dynamic Programming</h1>
Formal Description
Given two n-tuples of positive numbers $[v_1, v_2, ..., v_n]$ and $[w_1, w_2, ..., w_n]$, find the $\sum_i (v_i)$ that maximizes $\sum_i (w_i)$ subject to the constraint $\sum_i (w_i) \leq W$.
This problem is called 0-1 Knapsack problem because the i-th item is either in the optimal subset or it's not.
If fractions of the items are allowed, then there's a simple Greedy solution:
Calculate $u_i = \frac {v_i} {w_i}$ for each $i$.
Sort the items by the decreasing order of $u_i$.
Take each item $u_i$ until W runs out. If we can't take the entire item, take the fraction $f$ such that addition of $f*w_i$ keeps the sum of weights in the optimal subset <= W.
The brute force approach requires generating all possible combinations ($2^n-1$) which is exponential in n.
Optimal Substructure Step
The optimal substructure property of the Dynamic Programming solution for the problem can be expressed thus:
Let's say at any given point, $S$ is a potential optimal subset excluding item $i$ and $V(S, w)$ is the maximum value for a weight contraint $w$. Then, the maximum value of the potential optimal subset including item $i$ is:
$$
V(S \cup {item_i}, w) =
\begin{cases}
V(S, w), \text{if} \space w_i \gt w \
max(V(S, w), V(S \cup {item_i}, w - w_i)), \text{otherwise}
\end{cases}
$$
To implement this algorithm in code, we'll use a 2D array (pandas DataFrame) with one row per item and W+1 columns. Each cell in $row_i, column_j$ represents the maximum value using the first $i$ items subject to weight constraint $j$.
End of explanation
"""
for i in optimal_weights.index[1:]:
curr_item_val, cur_item_wt = items_df.loc[i, ['values', 'weights']]
for j in range(1, W+1):
if j < cur_item_wt:
# if the target weight is < weight of the current weight,
# then current item cannot be included in the optimal subset,
# so the optimal value remains unchanged.
cur_optimal_value = optimal_weights.loc[i-1, j]
else:
# The target weight is the maximum of two cases
# with and without the current item
cur_optimal_value = max(
optimal_weights.loc[i-1, j],
optimal_weights.loc[i-1, j-cur_item_wt] + curr_item_val)
optimal_weights.loc[i, j] = cur_optimal_value
optimal_weights
"""
Explanation: Now we're ready to implement the DP step.
End of explanation
"""
optimal_weights.loc[len(items_df), W]
"""
Explanation: The final result is the last element of optimal_weights.
End of explanation
"""
|
datascience-practice/data-quest | python_introduction/beginner/booleans-and-if-statements.ipynb | mit | cat = True
dog = False
print(type(cat))
"""
Explanation: 1: Booleans
Instructions
Assign the value True to the variable cat and the value False to the variable dog. Then use the print() function and the type() function to display the type for cat.
Answer
End of explanation
"""
from cities import cities
print(cities)
first_alb = cities[0] == 'Albuquerque'
second_alb = cities[1] == 'Albuquerque'
first_last = cities[0] == cities[-1]
print(first_alb, second_alb, first_last)
"""
Explanation: 2: Boolean operators
Instructions
Use the Boolean operators to determine if the following pairs of values are equivalent:
first element of cities and the string "Albuquerque". Assign the resulting Boolean value to first_alb
second element of cities and the string "Albuquerque". Assign the resulting Boolean value to second_alb
first element of cities and the last element in cities. Assign the resulting Boolean value to first_last
End of explanation
"""
crime_rates = [749, 371, 828, 503, 1379, 425, 408, 542, 1405, 835, 1288, 647, 974, 1383, 455, 658, 675, 615, 2122, 423, 362, 587, 543, 563, 168, 992, 1185, 617, 734, 1263, 784, 352, 397, 575, 481, 598, 1750, 399, 1172, 1294, 992, 522, 1216, 815, 639, 1154, 1993, 919, 594, 1160, 636, 752, 130, 517, 423, 443, 738, 503, 413, 704, 363, 401, 597, 1776, 722, 1548, 616, 1171, 724, 990, 169, 1177, 742]
print(crime_rates)
first = crime_rates[0]
first_500 = first > 500
first_749 = first >= 749
first_last = first >= crime_rates[-1]
print(first_500, first_749, first_last)
"""
Explanation: 3: Booleans with greater than
Instructions
The variable crime_rates is a list of integers containing the crime rates from the dataset. Perform the following comparisons:
evaluate if the first element in crime_rates is larger than the integer 500, assign the Boolean result to first_500
evaluate if the first element in crime_rates is larger than or equal to 749, assign the Boolean result to first_749
evaluate if the first element in crime_rates is greater than or equal to the last element in crime_rates, assign the Boolean result to first_last
Answer
End of explanation
"""
second = crime_rates[1]
second_500 = second < 500
second_371 = second <= 371
second_last = second <= crime_rates[-1]
print(second_500, second_371, second_last)
"""
Explanation: 4: Booleans with less than
Instructions
The variable crime_rates is a list containing the crime rates from the dataset as integers. Perform the following comparisons:
* determine if the second element in crime_rates is smaller than the integer 500, assign the Boolean result to second_500
* determine if the second element in crime_rates is smaller than or equal to 371, assign the Boolean result to second_371
* determine if the second element in crime_rates is smaller than or equal to the last element in crime_rates, assign the Boolean result to second_last
Answer
End of explanation
"""
result = 0
if cities[2] == u"Anchorage":
result = 1
assert result == 1
"""
Explanation: 5: If statements
Instructions
Determine if the third element in cities is equivalent to the string "Anchorage". If it is equivalent, change the variable result to 1.
Answer
End of explanation
"""
reqults = 0
if crime_rates[0] > 500:
if crime_rates[0] > 300:
results = 3
"""
Explanation: 6: Nesting if statements
Instructions
Nest if statements in the following order:
first one checks if the first element in crime_rates is larger than 500
second one checks if the second element in crime_rates is larger than 300
if both statements evaluate to True, assign the value 3 to the variable results
Answer
End of explanation
"""
five_hundred_list = []
for cr in crime_rates:
if cr > 500:
five_hundred_list.append(cr)
assert all([_>500 for _ in five_hundred_list])
"""
Explanation: 7: If statements and for loops
Instructions
Create a new list, five_hundred_list, that contains only the elements from crime_rates that are greater than 500. To accomplish this, you'll need a for loop and an if statement:
the for loop specifies which list we want to iterate over and the name of the iterator variable (we use cr in our answer)
the if statement determines if the current element (cr) is larger than 500
if the current element (cr) is larger than 500, use the append() method to add it to five_hundred_list
Answer
End of explanation
"""
print(crime_rates)
highest = crime_rates[0]
for cr in crime_rates:
if cr > highest:
highest = cr
"""
Explanation: 8: Find the highest crime rate
Instructions
Now [...] we can find the highest crime rate. crime_rates is a list of integers where each integer is a crime rate.
One strategy is to:
assign the value at index 0 from crime_rates to a new integer variable called highest
use a for loop to compare each value in crime_rates to highest and assign that value to highest if it's larger
Find the largest integer in crime_rates using the strategy we just discussed and assign that value to the variable highest.
Answer
End of explanation
"""
|
lukassnoek/ICON2017 | tutorial/ICON2017_tutorial_answers.ipynb | mit | # First, we need to import some Python packages
import numpy as np
import pandas as pd
import os.path as op
import warnings
import matplotlib.pyplot as plt
plt.style.use('classic')
warnings.filterwarnings("ignore")
%matplotlib inline
# The onset times are loaded as pandas dataframe with three columns:
# onset times (in seconds) (column 1), durations (column 2), and conditions (column 3).
# N.B.: condition 0 = passive, condition 1 = active
stim_info = pd.read_csv(op.join('example_data', 'onsets.csv'), sep='\t',
names=['onset', 'duration', 'trial_type'])
"""
Explanation: ICON 2017 workshop: MVPA of fMRI data in Python (ANSWERS)
Notebook with answers to the ToDo/ToThink questions.
<div class='alert alert-info'>
**ToThink**: Decide for the following scenarios whether they correspond to a **within-subject** or **between-subject** design!<br><br>
1. Subjects view a sequence of negative (violence, mutilation, war scenes) images during fMRI acquisition. One group of subjects are administered [propanolol](https://en.wikipedia.org/wiki/Propranolol) before the fMRI session and another group a placebo. The researcher in charge wants to see whether patterns in the insula change in reponse to propanolol using a pattern-based analysis. <br><br>
2. Subjects view images of scenes with either animate of inanimate objects during fMRI acquisition. The researcher wants to decode "animacy" from patterns in the lateral occipital cortex.
</div>
Answer
This is a between-subject design!
This is a within-subject design!
End of explanation
"""
stim_info[['onset', 'duration']] = (stim_info[['onset', 'duration']] / 2).astype(int)
"""
Explanation: Remember, the onsets (and duration) are here defined in seconds (not TRs). Let's assume that the fMRI-run has a TR of 2. Now, we can convert (very easily!) the onsets/durations-in-seconds to onsets/durations-in-TRs.
End of explanation
"""
# ANSWER
n_trials = 40
stim_vec = np.zeros((162, n_trials))
for itrial in range(n_trials):
idx = stim_info['onset'][itrial]
stim_vec[idx, itrial] = 1
np.testing.assert_array_equal(stim_vec, np.load('example_data/stim_vec.npy'))
print("Well done!")
"""
Explanation: To perform the first-level analysis, for each regressor (trial) we need to create a regressor of zeros and ones, in which the ones represent the moments in which the particular trial was presented. Let's assume that our moment of interest is the encoding phase, which lasts only 2 seconds; we thus can model it as an "impulse".
So, for example, if you have a (hypothetical) run with a total duration of 15 TRs, and you show a stimulus at TR=3 for the duration of 1 TRs (i.e. 2 seconds), then you'd code your regressor as:
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
<div class='alert alert-warning'>
**ToDo**: Below, we initialized a stimulus vector (`stim_vec`) of shape=[162, 40], i.e. timepoints x trials (this run was 162 TRs long), with zeros. Each of the 40 rows represents one trial. Loop over the colums of the `stim_vec` matrix and fill the times at onset till the onset + 2 TRs with ones. Remember, the first index in Python is zero (not 1!).
</div>
End of explanation
"""
from glob import glob
"""
Explanation: 1.4.1 tips & tricks to load and transform (nifti-)files
As a first thing, we need to find all the paths to the t-stat nifti-files. Python has a nifty (pun intended) tool called "glob" which can find files/directories on disk using wildcards. It is usually imported as follows:
End of explanation
"""
import os
# the images are in img/ on Linux/Mac systems, but in img\ on Windows (hence the "os.sep" thingie)
my_search_string = 'img' + os.sep + '*.png'
png_files = glob(my_search_string)
print(png_files)
"""
Explanation: glob, in Python, is a function that takes a path (as a string) with one or more wildcard characters (such as the *) and searches for files/directories on disk that match that. For example, let's try to find all the png-imagesin the "img" directory using glob (these are the images that I used inside this notebook).
End of explanation
"""
# ANSWER
tstat_paths = glob(op.join('..', 'data', 'pi0070', 'wm.feat', 'stats', 'tstat*.nii.gz'))
# To check your answer, run this cell
assert(len(tstat_paths) == 40)
print("Well done! You globbed all the 40 tstat-files correctly!")
"""
Explanation: As you can see, it returns a list with all the files/directories that matched the search-string. Note that you can also search files outside of the current directory. To do so, we can simply specify the relative or absolute path to it.
<div class='alert alert-warning'>
**ToDo**: Now you have the skills to actually "glob" all the *t*-stats from subject `pi0070` yourself! Use glob to find all the paths to the t-stats and store the results (a list with 40 strings) in a variable called `tstat_paths`. Note: the data directory is one directory above the current directory! Hint: watch out! There might be an `ftest.nii.gz` file in the stats-directory ...
</div>
End of explanation
"""
# Let's fix it
from functions import sort_nifti_paths
tstat_paths = sort_nifti_paths(tstat_paths)
import nibabel as nib
data = nib.load(tstat_paths[0]).get_data()
"""
Explanation: To fix this issue, we wrote a little function (sort_nifti_paths()) that sorts the paths correctly. (If you're interested in how it works, check out the functions.py file.)
End of explanation
"""
# ANSWER
voxel_dims = (80, 80, 37) # The data is in EPI-space
X = np.zeros((len(tstat_paths), np.prod(voxel_dims)))
for trial, tstat_path in enumerate(tstat_paths):
data = nib.load(tstat_path).get_data()
data = data.ravel()
X[trial,:] = data
# Can we check if X is correct here? Would be a good check before continuing to part 2
np.testing.assert_almost_equal(X, np.load('example_data/X_section1.npz')['X'])
print("Well done!")
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: in the code block below, write a loop that loads in the tstat nifti-files one by one (using nibabel) and store them in the already preallocated array "X". Note that "X" is a 2D matrix (samples-by-features), but each tstat-file contains a 3D array, so you need to "flatten" the 3D array to a single vector: use e.g. the numpy function "flatten()" or "ravel()".
</div>
End of explanation
"""
# ANSWER
y = stim_info['trial_type'].values
"""
Explanation: Part 2. Multivoxel pattern analysis
2.1 Adding the dependent variable y
In Section 1.3, you ended with a nice 2D-matrix of N-samples x N-features. This 2D-matrix contains all whole-brain patterns of t-values for all trials: this is your X. However, this leaves out a crucial part of the data: the actual feature-of-interest, trial type, your y.
While there is kind of a generic way to load in voxel patterns, there is usually not a single way to load in your dependent variable (y), because the exact factor that represents y dependent on your exact research question (and also depends how you have stored this data on disk).
In within-subject single-trial designs, trial type or condition is often the dependent variable. The dependent variable can thus be extracted from your design. In fact, we already loaded the dependent variable previously, in the onsets variable (see Section 1.2). The third column named 'trial_type') contains the trial types, where 1 is an "ACTIVE" trial and 0 a "PASSIVE" trial.
<div class='alert alert-warning'>
**ToDo**: Extract vector *y* from `onsets`, which only contains the trial types (i.e., y = [1, 1, 1, 0, ..., 0, 1, 1])
</div>
End of explanation
"""
np.testing.assert_equal(np.array(y), np.load('example_data/y.npy'))
print('Well done!')
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler() # Here we initialize the StandardScaler object
scaler.fit(X) # Here we "fit" the StandardScaler to our entire dataset (i.e. calculates means and stds of each feature)
X = scaler.transform(X) # And here we transform the dataset using the calculated means/stds
"""
Explanation: Check your answer
End of explanation
"""
means = np.mean(X, axis=0)
np.testing.assert_almost_equal(means, np.zeros(X.shape[1]))
print("Each column (feature) has mean 0!")
stds = X.std(axis=0)
np.testing.assert_almost_equal(stds[stds != 0], np.ones((stds != 0).sum()))
print("Each column (feature) has std 1!")
# Scikit-learn is always imported as 'sklearn'
from sklearn.svm import SVC
# clf = CLassiFier
clf = SVC(kernel='linear')
print('Fitting SVC ...', end='')
clf.fit(X, y)
print(' done.')
coefs = clf.coef_
print("Shape of coefficients: %r" % (coefs.shape,))
y_hat = clf.predict(X)
print("The predictions for my samples are:\n %r" % y_hat.tolist())
"""
Explanation: Did the scaling procedure work? Let's check that below (by asserting that the mean of each column is 0, and the std of each column is 1):
End of explanation
"""
# ANSWER
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis()
lda.fit(X, y)
lda.predict(X)
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: The cool thing about scikit-learn is that their objects have a very consistent API and have sensible defaults. As a consequence, *every* model ("estimator" in scikit-learn terms) is used in the same way using the `fit(X, y)` and `predict(X)` methods. Try it out yourself below!<br><br> Try using the [LinearDiscriminantAnalysis](http://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html#sklearn.discriminant_analysis.LinearDiscriminantAnalysis) class (`from sklearn.discriminant_analysis import LinearDiscriminantAnalysis`) or [GaussianNB](http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html#sklearn.naive_bayes.GaussianNB) ("Naive Bayes" classifier; `from sklearn.naive_bayes import GaussianNB`) for example (or any other!). All methods should work exactly the same as the previous example using the SVM.
</div>
End of explanation
"""
# ANSWER
accuracy = (y_hat==y).mean()
print(accuracy)
# or alternatively
accuracy = (y_hat == y).sum() / y.size # or: len(y)
# or using scikit-learn metrics
from sklearn.metrics import accuracy_score
accuracy_score(y, y_hat)
from sklearn.model_selection import train_test_split
if not isinstance(y, np.ndarray):
y = np.array(y)
# The argument "test_size" indicates the test-size as a proportion
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, stratify=y,
random_state=5)
print("Shape X_train: %r" % (X_train.shape,))
print("Shape y_train: %r" % (y_train.shape,))
print("Shape X_test: %r" % (X_test.shape,))
print("Shape y_test: %r" % (y_test.shape,))
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: Can you calculate the accuracy of the above model? Hint 1: you need to compare the true labels (i.e. y) with the predicted labels (i.e. y_hat). Hint 2: if you do arithmetic with boolean values (i.e. `True` and `False`), `True` is interpreted as 1 and `False` is interpreted as 0.
</div>
End of explanation
"""
# ANSWER
is_equal_ratio = y_train.mean() == y_test.mean()
print("Is the class-ratio the same? --> %s (namely %.1f)" % (is_equal_ratio, y_train.mean()))
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: To convince yourself that the `stratify` option does what it supposed to do, check whether the class-ratio is the same for the train and test-set.
</div>
End of explanation
"""
# ANSWER
clf.fit(X=X_train, y=y_train)
y_hat_train = clf.predict(X=X_train)
y_hat_test = clf.predict(X=X_test)
print("Accuracy train: %.3f" % (y_hat_train == y_train).mean())
print("Accuracy test: %.3f" % (y_hat_test == y_test).mean())
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: Fit your model on `X_train` and `y_train` and then predict `X_test`. Calculate both the accuracy on the train-set (fit and predict on train) *and* the cross-validated accuracy (fit on train, predict on test). Compare the two accuracies - are we overfitting the model?
</div>
End of explanation
"""
# ANSWER
clf.fit(X=X_train, y=y_train)
y_hat_train = clf.predict(X=X_train)
y_hat_test = clf.predict(X=X_test)
print("ROC-AUC train: %.3f" % roc_auc_score(y_train, y_hat_train))
print("ROC-AUC test: %.3f" % roc_auc_score(y_test, y_hat_test))
# scikit-learn is imported as 'sklearn'
from sklearn.model_selection import StratifiedKFold
# They call folds 'splits' in scikit-learn
skf = StratifiedKFold(n_splits=5)
folds = skf.split(X, y)
# Notice how we "unpack" the train- and test-indices at the start of the loop
i = 1
for train_idx, test_idx in folds:
print("Processing fold %i" % i)
print("Train-indices: %s" % train_idx)
print("Test-indices: %s\n" % test_idx)
i += 1
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: Evaluate the model from the previous ToDo with the ROC-AUC-score metric instead of accuracy. Check both the performance on the train-set (which should still be 1.0) and the (cross-validated) performance on the test-set.
Notice the drop in performance compared to using accuracy!
</div>
End of explanation
"""
# EXAMPLE ANSWER
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
# clf now is a logistic regression model
clf = LogisticRegression()
# run split() again to generate folds
folds = skf.split(X, y)
performance = np.zeros(skf.n_splits)
for i, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx,:]
y_train = y[train_idx]
X_test = X[test_idx,:]
y_test = y[test_idx]
# ToDo: call fit (on train) and predict (on test)
model = clf.fit(X=X_train, y=y_train)
y_hat = model.predict(X=X_test)
# ToDo: calculate accuracy
performance[i] = roc_auc_score(y_test, y_hat)
# ToDo: calculate average accuracy
print('Mean performance: %.3f' % np.mean(performance))
X_r = np.random.randn(80, 1000)
print("Shape of X: %s" % (X_r.shape, ), '\n')
y_r = np.tile([0, 1], 40)
print("Shape of y: %s" % (y_r.shape, ))
print("Y labels:\n%r" % y_r.tolist(), '\n')
runs = np.repeat([1, 2, 3, 4], 20)
print("Shape of runs: %s" % (runs.shape, ))
print("Run-indices: \n%r" % runs.tolist())
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: in the code-cell below, complete the statements by indexing X and y to create four different objects in every fold: X_train, y_train, X_test, y_test. Also, we created a new classifier-object (clf) for you based on a different model: scikit-learn's `LogisticRegression` to show you that *every* model in scikit-learn works the same (i.e. has the same `fit` and `predict` methods). <br><br>
Use this classifier to fit on the train-set and predict the test-set in every fold. Then, calculate the (cross-validated) performance (e.g. ROC-AUC-score) in every fold. Keep track of the accuracies across folds, and after the loop over folds, calculate the average performance across folds.
</div>
End of explanation
"""
# Import from model_selection module
from sklearn.model_selection import GroupKFold
# In fact, when we initialize GroupKFold with 4 splits, as below, it is exactly the same as
# the LeaveOneGroupOut cross-validator, since we only have 4 groups
gkf = GroupKFold(n_splits=4)
for train_idx, test_idx in gkf.split(X=X_r, y=y_r, groups=runs):
print("Indices of our test-samples: %r" % test_idx.tolist())
print("... which correspond to following runs: %r" % runs[test_idx].tolist(), '\n')
"""
Explanation: Now, scikit-learn offers a nice cross-validation class that partitions your data according to a "grouping" variable: GroupKFold, or variations thereof like LeaveOneGroupOut of LeavePGroupsOut. Let's check out how that can be used using our simulated data:
End of explanation
"""
# ANSWER
from sklearn.model_selection import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(n_splits=100, test_size=0.2)
"""
Explanation: <div class='alert alert-info'>
**ToThink**: While stratification can be ensured by using e.g. `StratifiedKFold` cross-validators, stratification cannot be guaranteed when using `GroupKFold`. Why do you think this is the case?
</div>
Answer
Because your "conditioning" your folds on your runs. If your samples are imbalanced within runs, stratified folds cannot be guaranteed.
<div class='alert alert-warning'>
**ToDo**: Below, try to implement a *repeated random splits* cross-validation routine using `StratifiedShuffleSplit` with 100 repetitions and a test-size of 0.2! Check out the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html#sklearn.model_selection.StratifiedShuffleSplit) for more info about its parameters.
</div>
End of explanation
"""
from sklearn.feature_selection import SelectKBest, f_classif
# f_classif is a scikit-learn specific implementation of the F-test
select2000best = SelectKBest(score_func=f_classif, k=2000)
from sklearn.feature_selection import SelectFwe, chi2
selectfwe_transformer = SelectFwe(score_func=chi2, alpha=0.01)
"""
Explanation: <div class='alert alert-info'>
**ToThink**: Suppose a researcher wants to decode gratings with two different orientations from V1. To delineate V1, the subject underwent a retinotopy session in a *different* fMRI run. The data from this retinotopy session was subsequently used to extract ("mask") V1 by excluding non-significant voxels; the significant voxels were in turn used to base the orientation decoding analysis on. <br><br>Is masking V1 using the retinotopy data a form of *feature selection* or *feature extraction*? Why?
</div>
Answer
This is feature-selection, because it selects a subset of voxels, and it doesn't transform features into (lower dimensional) components.
<div class='alert alert-info'>
**ToThink**: What could be a reason to prefer feature selection above feature extraction? And the other way around?
</div>
Answer
Often, feature-selection is more interpretable, because your features are still expressed as a set of voxels. Using PCA-components of voxels, for example, are slightly less interpretable, because they're expressed as a weighted combination of voxels.
End of explanation
"""
# Fit the transformer ...
select2000best.fit(X, y)
# ... which calculates the following attributes (.scores_ and .pvalues_)
# Let's check them out
scores = select2000best.scores_
pvalues = select2000best.pvalues_
# As you can see, each voxel gets its own score (in this case: an F-score)
print(scores.size)
# and its own p-value:
print(pvalues.size)
import matplotlib.pyplot as plt
%matplotlib inline
scores_3d = scores.reshape((80, 80, 37))
plt.figure(figsize=(20, 5))
for i, slce in enumerate(np.arange(15, 65, 5)):
plt.subplot(2, 5, (i+1))
plt.title('X = %i' % slce, fontsize=20)
plt.imshow(scores_3d[slce, :, :].T, origin='lower', cmap='hot')
plt.axis('off')
plt.tight_layout()
plt.show()
"""
Explanation: But how does this work in practice? We'll show you an (not cross-validated!) example using the select100best transformer initialized earlier:
End of explanation
"""
# ANSWER
from sklearn.decomposition import PCA
X_train_tmp, X_test_tmp = train_test_split(X, test_size=0.5)
pca = PCA(n_components=5)
pca.fit(X_train_tmp)
X_train_pca_transformed = pca.transform(X_train_tmp)
X_test_pca_transformed = pca.transform(X_test_tmp)
print("Shape X_train (after PCA): %s" % (X_train_pca_transformed.shape,))
print("Shape X_test (after PCA): %s" % (X_test_pca_transformed.shape,))
"""
Explanation: <div class='alert alert-info'>
**ToThink**: Given the image above, what is the major difference between data driven feature selection (like UFS) and ROI-based feature selection (e.g. only look at patterns in the amygdala) in terms of the spatial scale of patterns you'll select? Try to think of an advantage of UFS over ROI-based feature selection and vice versa.
</div>
Answer
UFS is blind to spatial scale, so you can extract spatially distributed set of voxels while ROI-based feature-selection (usually) assumes a spatially contiguous set of voxels.
<div class='alert alert-warning'>
**ToDo**: Below, we import the `PCA` class from scikit-learn. Check out the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html). Now, initialize an object from this `PCA`-class with the number of components to store set to 5, and subsequently fit in on `X_train_tmp` and subsequently call transform on `X_train_tmp` and `X_test_tmp` separately; store the result in the variables `X_train_pca_transformed` and `X_test_pca_transformed`. Then, check out the shape of `X_train_pca_transformed`: does it have the shape you expected?
</div>
End of explanation
"""
# ANSWER
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
folds = skf.split(X, y)
performance = np.zeros(skf.n_splits)
select1000best = SelectKBest(score_func=f_classif, k=1000)
for i, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx,:]
X_test = X[test_idx,:]
y_train = y[train_idx]
y_test = y[test_idx]
select1000best.fit(X=X_train, y=y_train)
X_train = select1000best.transform(X=X_train)
X_test = select1000best.transform(X=X_test)
# ToDo: calculate accuracy
clf.fit(X=X_train, y=y_train)
y_test_hat = clf.predict(X=X_test)
performance[i] = roc_auc_score(y_test, y_test_hat)
# ToDo: calculate average accuracy
print('Mean performance: %.3f' % np.mean(performance))
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
scaler = StandardScaler()
ufs = SelectKBest(score_func=f_classif, k=1000)
pca = PCA(n_components=10) # we want to reduce the features to 10 components
svc = SVC(kernel='linear')
from sklearn.pipeline import Pipeline
pipeline_to_make = [('preproc', scaler),
('ufs', ufs),
('pca', pca),
('clf', svc)]
my_pipe = Pipeline(pipeline_to_make)
X_train, y_train = X[0::2], y[0::2]
X_test, y_test = X[1::2], y[1::2]
my_pipe.fit(X_train, y_train)
predictions = my_pipe.predict(X_test)
performance = roc_auc_score(y_test, predictions)
print("Cross-validated performance on test-set: %.3f" % performance)
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: Below, we set up a K-fold cross-validation loop and prespecified a classifier (`clf`, a logistic regression model) and a transformer (`select1000best`, selecting the 1000 best features based upon an F-test). Now, it's up to you to actually implement the feature selection inside the for-loop. Make sure to fit the transformer only on the train-set, but then transform *both* the train-set and the test-set. Then, fit the model on the transformed train-set and cross-validate to the transformed test-set. Calculate performance (metric of your choice) of the cross-validated model for each fold, and after all folds calculate the average performance (across folds).
</div>
End of explanation
"""
# ANSWER
from sklearn.feature_selection import VarianceThreshold
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
pipe = Pipeline([
('varthres', VarianceThreshold()),
('cluster', KMeans(15)),
('scaler', StandardScaler()),
('clf', RandomForestClassifier())
])
pipe.fit(X_train, y_train)
predictions = pipe.predict(X_test)
performance = roc_auc_score(y_test, predictions)
print("Cross-validated performance on test-set: %.3f" % performance)
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: Test your pipeline-skills! Can you build a pipeline that incorporates a [VarianceThreshold](http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.VarianceThreshold.html#sklearn.feature_selection.VarianceThreshold) (removes all voxels outside the brain), K-means clustering (with 8 clusters, such that $K$ voxels --> 8 clusters, using [KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.k_means.html#sklearn.cluster.k_means)), scaling (`StandardScaler`), and a [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier)?
</div>
End of explanation
"""
# Example answer
participant_numbers = glob(op.join('..', 'data', 'pi*'))
print(participant_numbers)
# Next, we need to extract the participant numbers from the paths you just obtained. We do this for you here.
participant_numbers = [x.split('/')[-1] for x in participant_numbers]
print('\nThere are the participant numbers:\n%s' % participant_numbers)
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: Before we start, we need to get the participant numbers of all our participants. To find these, we can make use of the directory names. In Section **1.4.1** you learned the tricks you need to get all subdirectory names of the `data` directory using a wildcard!
</div>
End of explanation
"""
# ANSWER
skf = StratifiedKFold(n_splits=5)
select100best = SelectKBest(f_classif, k=100)
clf = SVC(kernel='linear')
pipe = Pipeline([('scaler', StandardScaler()), ('ufs', select1000best), ('clf', clf)])
all_performance = np.zeros(len(participant_numbers))
### Start loop over subjects ###
for i_sub, participant_number in enumerate(participant_numbers):
path_this_participant = op.join('..', 'data', participant_number, 'wm.feat', 'stats', 'tstat*.nii.gz')
t_stat_paths_this_participant = sort_nifti_paths(glob(path_this_participant))
voxel_dims = nib.load(t_stat_paths_this_participant[0]).header.get_data_shape()
X = np.zeros((len(t_stat_paths_this_participant), np.prod(voxel_dims)))
for trial, tstat_path in enumerate(t_stat_paths_this_participant):
data = nib.load(tstat_path).get_data()
data = data.ravel()
X[trial,:] = data
performance_this_participant = np.zeros(skf.n_splits)
# Loop over the folds
for i_fold, (train_idx, test_idx) in enumerate(skf.split(X, y)):
X_train, X_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]
pipe.fit(X_train, y_train)
preds = pipe.predict(X_test)
performance = roc_auc_score(y_test, preds)
performance_this_participant[i_fold] = performance
mean_performance_this_participant = performance_this_participant.mean()
print('Mean performance for participant %s: %.3f' % (participant_number, mean_performance_this_participant))
all_performance[i_sub] = mean_performance_this_participant
print('\nFinal performance mean (std): %.3f (%.3f)' % (all_performance.mean(), all_performance.std()))
from scipy.stats import ttest_1samp
"""
Explanation: <div class='alert alert-warning'>
**ToDo** (optional!): This final ToDo is a big one, in which everything we learned so far comes together nicely. Write a loop over all participants, implementing a cross-valdidated classification pipeline including feature selection. We provide you with some "boilerplate" code to start with, but it's up to you to complete the analysis/loop.
<br><br>
Make sure to include the following:
<br>
1. Scale the patterns (even better: cross-validate your scaler)<br>
2. Use 5-fold Stratified cross-validation<br>
3. Within each fold, select the best 100 features using an f_classif Selector<br>
4. Use a SVM to decode condition from these 100 features<br><br>
Optional:<br>
- Use a scikit-learn Pipeline in which you group your scaler, selector, and estimator!<br><br>
Hints:<br>
- The design for each subject was exactly the same, so you can use the same "y" as in the previous example!<br>
</div>
End of explanation
"""
# Example answer
t, p = ttest_1samp(all_performance, 0.5)
print('The t-value is %.3f, with a p-value of %.5f' % (t, p))
"""
Explanation: <div class='alert alert-warning'>
**ToDo**: Do the one-sample t-test! Can we decode the condition with a significantly above-chance accuracy?
</div>
End of explanation
"""
|
dwhswenson/openpathsampling | examples/misc/move_strategies_and_schemes.ipynb | mit | %matplotlib inline
import openpathsampling as paths
from openpathsampling.visualize import PathTreeBuilder, PathTreeBuilder
from IPython.display import SVG, HTML
import openpathsampling.high_level.move_strategy as strategies # TODO: handle this better
# real fast setup of a small network
cvA = paths.FunctionCV(name="xA", f=lambda s : s.xyz[0][0])
cvB = paths.FunctionCV(name="xB", f=lambda s : -s.xyz[0][0])
stateA = paths.CVDefinedVolume(cvA, float("-inf"), -0.5).named("A")
stateB = paths.CVDefinedVolume(cvB, float("-inf"), -0.5).named("B")
interfacesA = paths.VolumeInterfaceSet(cvA, float("-inf"),
[-0.5, -0.3, -0.1])
interfacesB = paths.VolumeInterfaceSet(cvB, float("-inf"),
[-0.5, -0.3, -0.1])
network = paths.MSTISNetwork(
[(stateA, interfacesA),
(stateB, interfacesB)],
ms_outers=paths.MSOuterTISInterface.from_lambdas(
{interfacesA: 0.0, interfacesB: 0.0}
)
)
"""
Explanation: This document is intended for intermediate to advanced users. It deals with the internals of the MoveStrategy and MoveScheme objects, as well how to create custom versions of them. For most users, the default behaviors are sufficient.
End of explanation
"""
scheme = paths.DefaultScheme(network)
"""
Explanation: MoveStrategy and MoveScheme
After you've set up your ensembles, you need to create a scheme to sample those ensembles. This is done by the MoveStrategy and MoveScheme objects.
OpenPathSampling uses a simple default scheme for any network, in which first you choose a type of move to do (shooting, replica exchange, etc), and then you choose a specific instance of that move type (i.e., which ensembles to use). This default scheme works for most cases, but you might find yourself in a situation where the default scheme isn't very efficient, or where you think you have an idea for a more efficient scheme. OpenPathSampling makes it easy to modify the underlying move scheme.
Definitions of terms
move scheme: for a given simulation, the move scheme is the "move decision tree". Every step of the MC is done by starting with some root move, and tracing a series of decision points to generate (and then accept) a trial.
move strategy: a general approach to building a move scheme (or a subset thereof). SRTIS is a move strategy. Nearest-neighbor replica exchange is a move strategy. All-replica exchange is a move strategy.
So we use "strategy" to talk about the general idea, and "scheme" to talk about a specific implementation of that idea. This document will describe both how to modify the default scheme for one-time modifications and how to develop new move strategies to be re-used on many problems.
For the simplest cases, you don't need to get into all of this. All you need to do is to use the DefaultScheme, getting the move decision tree as follows:
End of explanation
"""
move_vis = paths.visualize.MoveTreeBuilder.from_scheme(scheme)
SVG(move_vis.svg())
"""
Explanation: OpenPathSampling comes with a nice tool to visualize the move scheme. There are two main columns in the output of this visualization: at the left, you see a visualization of the move decision tree. On the right, you see the input and output ensembles for each PathMover.
The move decision tree part of the visualization should be read as follows: each RandomChoiceMover (or related movers, such as OneWayShooting) randomly select one of the movers at the next level of indentation. Any form of SequentialMover performs the moves at the next level of indentation in the order from top to bottom.
The input/output ensembles part shows possible input ensembles to the move marked with a green bar at the top, and possible output ensembles to the move marked with a red bar on the bottom.
The example below shows this visualization for the default scheme with this network.
End of explanation
"""
# example: switching between AllSetRepEx and NearestNeighborRepEx
scheme = paths.DefaultScheme(network)
scheme.append(strategies.AllSetRepExStrategy())
"""
Explanation: MoveSchemes are built from MoveStrategy objects
In the end, you must give your PathSimulator object a single MoveScheme. However, this scheme might involve several different strategies (for example, whether you want to do one-way shooting or two-way shooting is one strategy decision, and it each can be combined with either nearest-neightbor replica exchange strategy or all-replica exchange strategy: these strategy decisions are completely independent.)
Creating a strategy
A strategy should be thought of as a way to either add new PathMovers to a MoveScheme or to change those PathMovers which already exist in some way.
Every MoveStrategy therefore has an ensembles parameter. If the ensembles parameter is not given, it is assumed that the user intended all normal ensembles in the scheme's transitions. Every strategy also has an initialization parameter called group. This defines the "category" of the move. There are several standard categories (described below), but you can also create custom categories (some examples are given later).
Finally, there is another parameter which can be given in the initialization of any strategy, but which must be given as a named parameter. This is replace, which is a boolean stating whether the movers created using this should replace those in the scheme at this point.
Strategy groups
Intuitively, we often think of moves in groups: the shooting moves, the replica exchange moves, etc. For organizational and analysis purposes, we include that structure in the MoveScheme, and each MoveStrategy must declare what groups it applies to. OpenPathSampling allows users to define arbitrary groups (using strings as labels). The standard schemes use the following groups:
'shooting'
'repex'
'pathreversal'
'minus'
Strategy levels
In order to apply the strategies in a reasonable order, OpenPathSampling distinguishes several levels at which move strategies work. For example, one level determines which swaps define the replica exchange strategy to be used (SIGNATURE), and another level determines whether the swaps are done as replica exchange or ensemble hopping (GROUP). Yet another level creates the structures that determine when to do a replica exchange vs. when to do a shooting move (GLOBAL).
When building the move decision tree, the strategies are applied in the order of their levels. Each level is given a numerical value, meaning that it is simple to create custom orderings. Here are the built-in levels, their numeric values, and brief description:
levels.SIGNATURE = 10:
levels.MOVER = 30:
levels.GROUP = 50:
levels.SUPERGROUP = 70:
levels.GLOBAL = 90:
Applying the strategy to a move scheme
To add a strategy to the move scheme, you use MoveScheme's .append() function. This function can take two arguments: the list of items to append (which is required) and the levels associated with each item. By default, every strategy has a level associated with it, so under most circumstances you don't need to use the levels argument.
Now let's look at a specific example. Say that, instead of doing nearest-neighbor replica exchange (as is the default), we wanted to allow all exchanges within each transition. This is as easy as appending an AllSetRepExStrategy to our scheme.
End of explanation
"""
move_vis = paths.visualize.MoveTreeBuilder.from_scheme(scheme)
SVG(move_vis.svg())
"""
Explanation: Now when we visualize this, note the difference in the replica exchange block: we have 6 movers instead of 4, and now we allow the exchanges between the innermost and outermost ensembles.
End of explanation
"""
scheme.append(strategies.NearestNeighborRepExStrategy(), force=True)
move_vis = paths.visualize.MoveTreeBuilder.from_scheme(scheme)
SVG(move_vis.svg())
"""
Explanation: What if you changed your mind, or wanted to go the other way? Of course, you could just create a new scheme from scratch. However, you can also append a NearestNeighborRepExStrategy after the AllSetRepExStrategy and, from that, return to nearest-neighbor replica exchange.
For NearestNeighborRepExStrategy, the default is replace=True: this is required in order to replace the AllSetRepExStrategy. Also, to obtain the new move decision tree, you have to pass the argument rebuild=True. This is because, once you've built the tree once, the function scheme.mover_decision_tree() will otherwise skip building the scheme and return the root of the already-built decision tree. This allows advanced custom changes, as discussed much later in this document.
End of explanation
"""
# example: single replica
"""
Explanation: Combination strategies
OpenPathSampling provides a few shortcuts to strategies which combine several substrategies into a whole.
DefaultMoveStrategy
The DefaultMoveStrategy converts the move scheme to one which follows the default OpenPathSampling behavior.
TODO: note that this isn't always the same as the default scheme you get from an empty move scheme. If other movers exist, they are converted to the default strategy. So if you added movers which are not part of the default for your network, they will still get included in the scheme.
SingleReplicaStrategy
The SingleReplicaStrategy converts all replica exchanges to ensemble hops (bias parameter required). It then reshapes the move decision tree so that is organized by ensemble, TODO
End of explanation
"""
ens00 = network.sampling_transitions[0].ensembles[0]
ens02 = network.sampling_transitions[0].ensembles[2]
extra_repex = strategies.SelectedPairsRepExStrategy(ensembles=[ens00, ens02], replace=False)
scheme = paths.DefaultScheme(network)
scheme.append(extra_repex)
"""
Explanation: Examples of practical uses
In the examples above, we saw how to change from nearest neighbor replica exchange to all (in-set) replica exchange, and we saw how to switch to a single replica move strategy. In the next examples, we'll look at several other uses for move strategies.
Adding a specific extra replica exchange move
In the examples above, we showed how to get either a nearest neighbor replica exchange attempt graph, or an all in-set replica exchange attempt graph. If you want something in-between, there's also the NthNearestNeighborRepExStrategy, which works like those above. But what if (probably in addition to one of these schemes) you want to allow a certain few replica exchange? For example, in a multiple interface set approach you might want to include a few exchanges between interfaces in different sets which share the same initial state.
To do this, we start with an acceptable strategy (we'll assume the default NearestNeighborRepExStrategy is our starting point) and we add more moves using SelectedPairsRepExStrategy, with replace=False.
End of explanation
"""
move_vis = paths.visualize.MoveTreeBuilder.from_scheme(scheme)
SVG(move_vis.svg())
"""
Explanation: Now we have 7 replica exchange movers (5 not including MS-outer), as can be seen in the move tree visualization.
End of explanation
"""
# select the outermost ensemble in each sampling transition
special_ensembles = [transition.ensembles[-1] for transition in network.sampling_transitions]
alternate_shooting = strategies.OneWayShootingStrategy(
selector=paths.UniformSelector(), # TODO: change this
ensembles=special_ensembles
)
# note that replace=True is the default
scheme = paths.DefaultScheme(network)
scheme.movers = {} # TODO: this will be removed, and lines on either side combined, when all is integrated
scheme.append(alternate_shooting)
move_decision_tree = scheme.move_decision_tree()
# TODO: find a way to visualize
"""
Explanation: First crossing shooting point selection for some ensembles
For ensembles which are far from the state, sometimes uniform shooting point selection doesn't work. If the number of frames inside the interface is much larger than the number outside the interface, then you are very likely to select a shooting point inside the interface. If that point is far enough from the interface, it may be very unlikely for the trial path to cross the interface.
One remedy for this is to use the first frame after the first crossing of the interface as the shooting point. This leads to 100% acceptance of the shooting move (every trial satisfies the ensemble, and since there is only one such point -- which is conserved in the trial -- the selection probability is equal in each direction.)
The downside of this approach is that the paths decorrelate much more slowly, since only that one point is allowed for shooting (path reversal moves change which is the "first" crossing, otherwise there would be never be complete decorrelation). So while it may be necessary to do it for outer interfaces, doing the same for inner interfaces may slow convergence.
The trick we'll show here is to apply the first crossing shooting point selection only to the outer interfaces. This can increase the acceptance probability of the outer interfaces without affecting the decorrelation of the inner interfaces.
End of explanation
"""
# example: add extra shooting (in a different group, preferably)
extra_shooting = strategies.OneWayShootingStrategy(
selector=paths.UniformSelector(), # TODO: change this
group='small_step_shooting'
)
scheme = paths.DefaultScheme(network)
scheme.append(extra_shooting)
"""
Explanation: Two different kinds of shooting for one ensemble
In importance sampling approaches like TIS, you're seeking a balance between two sampling goals. On the one hand, most of space has a negligible (or zero) contribution to the property being measured, so you don't want your steps to be so large that your trials are never accepted. On the other hand, if you make very small steps, it takes a long time to diffuse through the important region (i.e., to decorrelate).
One approach which could be used to fix this would be to allow two different kinds of moves: one which makes small changes with a relatively high acceptance probability to get accepted samples, and one which makes larger changes in an attempt to decorrelate.
This section will show you how to do that by adding a small_step_shooting group which does uses the first crossing shooting point selection. (In reality, a better way to get this effect would be to use the standard one-way shooting to do the small steps, and use two-way shooting -- not yet implemented -- to get the larger steps.)
End of explanation
"""
move_vis = paths.visualize.MoveTreeBuilder.from_scheme(scheme)
SVG(move_vis.svg())
"""
Explanation: In the visualization of this, you'll see that we have 2 blocks of shooting moves: one is the pre-existing group called 'shooting', and the other is this new group 'small_step_shooting'.
End of explanation
"""
# example: custom subclass of `MoveStrategy`
class RepExShootRepExStrategy(strategies.MoveStrategy):
_level = strategies.levels.GROUP
# we define an init function mainly to set defaults for `replace` and `group`
def __init__(self, ensembles=None, group="repex_shoot_repex", replace=True, network=None):
super(RepExShootRepExStrategy, self).__init__(
ensembles=ensembles, group=group, replace=replace
)
def make_movers(self, scheme):
# if we replace, we remove these groups from the scheme.movers dictionary
if self.replace:
repex_movers = scheme.movers.pop('repex')
shoot_movers = scheme.movers.pop('shooting')
else:
repex_movers = scheme.movers['repex']
shoot_movers = scheme.movers['shooting']
# combine into a list for the SequentialMover
mover_list = repex_movers + shoot_movers + list(reversed(repex_movers))
combo_mover = paths.SequentialMover(mover_list)
return [combo_mover]
repex_shoot_repex = RepExShootRepExStrategy()
scheme = paths.DefaultScheme(network)
scheme.append(repex_shoot_repex)
"""
Explanation: RepEx-Shoot-RepEx
One of the mains goals of OpenPathSampling is to allow users to develop new approaches. New move strategies certainly represents one direction of possible research. This particular example also shows you how to implement such features. It includes both implementation of a custom PathMover and a custom MoveStrategy.
Say that, instead of doing the standard replica exchange and shooting moves, you wanted to combine them all into one move buy first doing all the replica exchanges in one order, then doing all the shooting moves, then doing all the replica exchanges in the other order.
To implement this, we'll create a custom subclass of MoveStrategy. When making the movers for this strategy, we'll use the built-in SequentialMover object to create the move we're interested in.
End of explanation
"""
# TODO: there appears to be a bug in MoveTreeBuilder with this scheme
move_vis = paths.visualize.MoveTreeBuilder.from_scheme(scheme)
SVG(move_vis.svg())
"""
Explanation: You'll notice that the combo_mover we defined above is within a RandomChoiceMover: that random choice is for the group 'repex_shoot_repex', which has only this one member.
In this, we have used the default replace=True, which removes the old groups for the shooting movers and replica exchange movers. If you would like to keep the old shooting and replica exchange moves around as well, you can use replace=False.
End of explanation
"""
# TODO: This is done differently (and more easily) now
# example: getting into the details
#scheme = paths.DefaultScheme(network)
#move_decision_tree = scheme.move_decision_tree()
#ens = network.sampling_transitions[0].ensembles[-1]
#shooting_chooser = [m for m in move_decision_tree.movers if m.movers==scheme.movers['shooting']][0]
#idx_ens = [shooting_chooser.movers.index(m)
# for m in shooting_chooser.movers
# if m.ensemble_signature==((ens,), (ens,))]
#print shooting_chooser.weights
#for idx in idx_ens:
# shooting_chooser.weights[idx] *= 2
#print shooting_chooser.weights
"""
Explanation: Modifying the probabilities of moves
The DefaultStrategy includes default choices for the probability of making each move type, and then treats all moves within a given type with equal probability. Above, we described how to change the probability of a specific move type; now we're going to discuss changing the probability of a specific move within that type.
One approach would be to create a custom MoveStrategy at the GLOBAL level. However, in this section we're going to use a different paradigm to approach this problem. Instead of using a MoveStrategy to change the MoveScheme, we will manually modify it.
Keep in mind that this involves really diving into the guts of the MoveScheme object, with all the caveats that involves. Although this paradigm can be used in this and other cases, it is only recommended for advanced users.
One you've created the move decision tree, you can make any custom modifications to it that you would desire. However, it is important to remember that modifying certain aspects can lead to a nonsensical result. For example, appending a move to a RandomChoiceMover without also appending an associated weight will lead to nonsense. For the most part, it is better to use MoveStrategy objects to modify your move decision tree. But to make your own MoveStrategy subclasses, you will need to know how to work with the details of the MoveScheme and the move decision tree.
In this example, we find the shooting movers associated with a certain ensemble, and double the probability of choosing that ensemble if a shooting move is selected.
End of explanation
"""
|
nicoguaro/notebooks_examples | elasticity_fdm.ipynb | mit | from sympy import *
from continuum_mechanics.vector import lap, sym_grad
from continuum_mechanics.solids import navier_cauchy, strain_stress
init_printing()
x, y = symbols("x y")
lamda, mu, h = symbols("lamda mu h")
def construct_poly(pts, terms, var="u"):
npts = len(pts)
u = symbols("{}:{}".format(var, npts))
vander = Matrix(npts, npts,
lambda i, j: (terms[j]).subs({x: pts[i][0],
y: pts[i][1]}))
inv_vander = simplify(vander.inv())
shape_funs = simplify(inv_vander.T * Matrix(terms))
poly = sum(Matrix(u).T * shape_funs)
return poly
"""
Explanation: Finite differences for linear elasticity
Definitions
End of explanation
"""
pts = [[0, 0],
[h, 0],
[0, h],
[-h, 0],
[0, -h],
[h, h],
[-h, h],
[-h, -h],
[h, -h]]
terms = [S(1), x, y, x**2, x*y, y**2, x**2*y, x*y**2, x**2*y**2]
"""
Explanation: Nine-point stencil
We can compute the finite difference for elasticity applying
the Navier-Cauchy operator to a polynomial interpolator for
our stencil.
<center>
<img src="media/fd_elast_stencil.svg"
width="400"/>
</center>
End of explanation
"""
U = construct_poly(pts, terms, "u")
V = construct_poly(pts, terms, "v")
disp = Matrix([U, V, 0])
"""
Explanation: We can construct the interpolators for horizontal and
vertical components of the displacement vector.
End of explanation
"""
disp[0]
"""
Explanation: Let's take a look at one of the components
End of explanation
"""
simplify(navier_cauchy(disp, [lamda, mu]).subs({x:0, y:0}))
"""
Explanation: This expression is quite lengthy to manipulate by hand, but we
can obtain the finite difference for the Navier operator
straightforward.
End of explanation
"""
strain = (sym_grad(disp)).subs({x:0, y:0})
stress = strain_stress(strain, [lamda, mu])
"""
Explanation: To impose Neuman boundary conditions we need to compute the stresses.
End of explanation
"""
t1 = stress * Matrix([1, 0, 0])
simplify(2*h*t1)
t2 = stress * Matrix([0, 1, 0])
simplify(2*h*t2)
from IPython.core.display import HTML
def css_styling():
styles = open('./styles/custom_barba.css', 'r').read()
return HTML(styles)
css_styling()
"""
Explanation: The tractions are the projection of the stress tensor. For a uniform grid
these would be horizontal or vertical.
End of explanation
"""
|
pytransitions/transitions | examples/Playground.ipynb | mit | from transitions import Machine
import random
class NarcolepticSuperhero(object):
# Define some states. Most of the time, narcoleptic superheroes are just like
# everyone else. Except for...
states = ['asleep', 'hanging out', 'hungry', 'sweaty', 'saving the world']
# A more compact version of the quickstart transitions
transitions = [['wakeup', 'asleep', 'hanging out'],
['work_out', 'hanging out', 'hungry'],
['eat', 'hungry', 'hanging out'],
{'trigger': 'distress_call', 'source': '*', 'dest': 'saving the world', 'before': 'change_into_super_secret_costume'},
{'trigger': 'complete_mission', 'source': 'saving the world', 'dest': 'sweaty', 'after': 'update_journal'},
{'trigger': 'clean_up', 'source': 'sweaty', 'dest': 'asleep', 'conditions': 'is_exhausted'},
['clean_up', 'sweaty', 'hanging out'],
['nap', '*', 'asleep']]
def __init__(self, name):
# No anonymous superheroes on my watch! Every narcoleptic superhero gets
# a name. Any name at all. SleepyMan. SlumberGirl. You get the idea.
self.name = name
self.kittens_rescued = 0 # What have we accomplished today?
# Initialize the state machine
self.machine = Machine(model=self, states=NarcolepticSuperhero.states,
transitions=NarcolepticSuperhero.transitions, initial='asleep')
def update_journal(self):
""" Dear Diary, today I saved Mr. Whiskers. Again. """
self.kittens_rescued += 1
@property
def is_exhausted(self):
""" Basically a coin toss. """
return random.random() < 0.5
def change_into_super_secret_costume(self):
print("Beauty, eh?")
def yell(self):
print(f"I am {self.name} and I am {self.state}!")
batman = NarcolepticSuperhero("Batman")
batman.wakeup()
assert batman.state == 'hanging out'
batman.yell()
# the rest is up to you ...
"""
Explanation: Playground
Make sure to read the documentation first.
Rescue those kittens!
Too much coffee with my hierarchical state machines!
Very asynchronous dancing
Fun with graphs
Rescue those kittens!
End of explanation
"""
from transitions.extensions import HierarchicalMachine as Machine
states = ['standing', 'walking', {'name': 'caffeinated', 'children':['dithering', 'running']}]
transitions = [
['walk', 'standing', 'walking'],
['stop', 'walking', 'standing'],
['drink', '*', 'caffeinated'],
['walk', ['caffeinated', 'caffeinated_dithering'], 'caffeinated_running'],
['relax', 'caffeinated', 'standing']
]
machine = Machine(states=states, transitions=transitions, initial='standing', ignore_invalid_triggers=True)
assert machine.walk() # Walking now
# I fancy a coffee right now ...
"""
Explanation: Too much coffee with my hierarchical state machines!
End of explanation
"""
from transitions.extensions.asyncio import AsyncMachine
import asyncio
class Dancer:
states = ['start', 'left_food_left', 'left', 'right_food_right']
def __init__(self, name, beat):
self.my_name = name
self.my_beat = beat
self.moves_done = 0
async def on_enter_start(self):
self.moves_done += 1
async def wait(self):
print(f'{self.my_name} stepped {self.state}')
await asyncio.sleep(self.my_beat)
async def dance(self):
while self.moves_done < 5:
await self.step()
dancer1 = Dancer('Tick', 1)
dancer2 = Dancer('Tock', 1.1)
m = AsyncMachine(model=[dancer1, dancer2], states=Dancer.states, initial='start', after_state_change='wait')
m.add_ordered_transitions(trigger='step')
# it starts okay but becomes quite a mess
_ = await asyncio.gather(dancer1.dance(), dancer2.dance())
"""
Explanation: Very asynchronous dancing
End of explanation
"""
from transitions.extensions.states import Timeout, Tags, add_state_features
from transitions.extensions.diagrams import GraphMachine
import io
from IPython.display import Image, display, display_png
@add_state_features(Timeout, Tags)
class CustomMachine(GraphMachine):
pass
states = ['new', 'approved', 'ready', 'finished', 'provisioned',
{'name': 'failed', 'on_enter': 'notify', 'on_exit': 'reset',
'tags': ['error', 'urgent'], 'timeout': 10, 'on_timeout': 'shutdown'},
'in_iv', 'initializing', 'booting', 'os_ready', {'name': 'testing', 'on_exit': 'create_report'},
'provisioning']
transitions = [{'trigger': 'approve', 'source': ['new', 'testing'], 'dest':'approved',
'conditions': 'is_valid', 'unless': 'abort_triggered'},
['fail', '*', 'failed'],
['add_to_iv', ['approved', 'failed'], 'in_iv'],
['create', ['failed','in_iv'], 'initializing'],
['init', 'in_iv', 'initializing'],
['finish', 'approved', 'finished'],
['boot', ['booting', 'initializing'], 'booting'],
['ready', ['booting', 'initializing'], 'os_ready'],
['run_checks', ['failed', 'os_ready'], 'testing'],
['provision', ['os_ready', 'failed'], 'provisioning'],
['provisioning_done', 'provisioning', 'os_ready']]
class Model:
# graph object is created by the machine
def show_graph(self, **kwargs):
stream = io.BytesIO()
self.get_graph(**kwargs).draw(stream, prog='dot', format='png')
display(Image(stream.getvalue()))
def is_valid(self):
return True
def abort_triggered(self):
return False
model = Model()
machine = CustomMachine(model=model, states=states, transitions=transitions, initial='new', title='System State',
show_conditions=True, show_state_attributes=True)
model.approve()
model.show_graph()
# Your turn! What happens next?
"""
Explanation: Fun with graphs
This requires pygraphviz or graphviz
End of explanation
"""
|
AllenDowney/ModSimPy | notebooks/chap22.ipynb | mit | # Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
"""
Explanation: Modeling and Simulation in Python
Chapter 22
Copyright 2017 Allen Downey
License: Creative Commons Attribution 4.0 International
End of explanation
"""
m = UNITS.meter
s = UNITS.second
kg = UNITS.kilogram
"""
Explanation: Vectors
A Vector object represents a vector quantity. In the context of mechanics, vector quantities include position, velocity, acceleration, and force, all of which might be in 2D or 3D.
You can define a Vector object without units, but if it represents a physical quantity, you will often want to attach units to it.
I'll start by grabbing the units we'll need.
End of explanation
"""
A = Vector(3, 4) * m
"""
Explanation: Here's a two dimensional Vector in meters.
End of explanation
"""
A.x
A.y
"""
Explanation: We can access the elements by name.
End of explanation
"""
A.mag
"""
Explanation: The magnitude is the length of the vector.
End of explanation
"""
A.angle
"""
Explanation: The angle is the number of radians between the vector and the positive x axis.
End of explanation
"""
B = Vector(1, 2) * m
"""
Explanation: If we make another Vector with the same units,
End of explanation
"""
A + B
"""
Explanation: We can add Vector objects like this
End of explanation
"""
A - B
"""
Explanation: And subtract like this:
End of explanation
"""
A.dist(B)
"""
Explanation: We can compute the Euclidean distance between two Vectors.
End of explanation
"""
A.diff_angle(B)
"""
Explanation: And the difference in angle
End of explanation
"""
mag = A.mag
angle = A.angle
"""
Explanation: If we are given the magnitude and angle of a vector, what we have is the representation of the vector in polar coordinates.
End of explanation
"""
x, y = pol2cart(angle, mag)
Vector(x, y)
"""
Explanation: We can use pol2cart to convert from polar to Cartesian coordinates, and then use the Cartesian coordinates to make a Vector object.
In this example, the Vector we get should have the same components as A.
End of explanation
"""
A / A.mag
"""
Explanation: Another way to represent the direction of A is a unit vector, which is a vector with magnitude 1 that points in the same direction as A. You can compute a unit vector by dividing a vector by its magnitude:
End of explanation
"""
A.hat()
"""
Explanation: Or by using the hat function, so named because unit vectors are conventionally decorated with a hat, like this: $\hat{A}$:
End of explanation
"""
# Solution goes here
"""
Explanation: Exercise: Create a Vector named a_grav that represents acceleration due to gravity, with x component 0 and y component $-9.8$ meters / second$^2$.
End of explanation
"""
degree = UNITS.degree
radian = UNITS.radian
"""
Explanation: Degrees and radians
Pint provides units to represent degree and radians.
End of explanation
"""
angle = 45 * degree
angle
"""
Explanation: If you have an angle in degrees,
End of explanation
"""
angle_rad = angle.to(radian)
"""
Explanation: You can convert to radians.
End of explanation
"""
angle_rad.to(radian)
"""
Explanation: If it's already in radians, to does the right thing.
End of explanation
"""
angle_rad.to(degree)
"""
Explanation: You can also convert from radians to degrees.
End of explanation
"""
np.deg2rad(angle)
"""
Explanation: As an alterative, you can use np.deg2rad, which works with Pint quantities, but it also works with simple numbers and NumPy arrays:
End of explanation
"""
# Solution goes here
# Solution goes here
"""
Explanation: Exercise: Create a Vector named a_force that represents acceleration due to a force of 0.5 Newton applied to an object with mass 0.3 kilograms, in a direction 45 degrees up from the positive x-axis.
Add a_force to a_grav from the previous exercise. If that addition succeeds, that means that the units are compatible. Confirm that the total acceleration seems to make sense.
End of explanation
"""
t_end = 10 * s
dt = t_end / 100
params = Params(x = 0 * m,
y = 1 * m,
g = 9.8 * m/s**2,
mass = 145e-3 * kg,
diameter = 73e-3 * m,
rho = 1.2 * kg/m**3,
C_d = 0.33,
angle = 45 * degree,
velocity = 40 * m / s,
t_end=t_end, dt=dt)
"""
Explanation: Baseball
Here's a Params object that contains parameters for the flight of a baseball.
End of explanation
"""
def make_system(params):
"""Make a system object.
params: Params object with angle, velocity, x, y,
diameter, duration, g, mass, rho, and C_d
returns: System object
"""
angle, velocity = params.angle, params.velocity
# convert angle to degrees
theta = np.deg2rad(angle)
# compute x and y components of velocity
vx, vy = pol2cart(theta, velocity)
# make the initial state
R = Vector(params.x, params.y)
V = Vector(vx, vy)
init = State(R=R, V=V)
# compute area from diameter
diameter = params.diameter
area = np.pi * (diameter/2)**2
return System(params, init=init, area=area)
"""
Explanation: And here's the function that uses the Params object to make a System object.
End of explanation
"""
system = make_system(params)
"""
Explanation: Here's how we use it:
End of explanation
"""
def drag_force(V, system):
"""Computes drag force in the opposite direction of `v`.
V: velocity Vector
system: System object with rho, C_d, area
returns: Vector drag force
"""
rho, C_d, area = system.rho, system.C_d, system.area
mag = rho * V.mag**2 * C_d * area / 2
direction = -V.hat()
f_drag = direction * mag
return f_drag
"""
Explanation: Here's a function that computes drag force using vectors:
End of explanation
"""
V_test = Vector(10, 10) * m/s
drag_force(V_test, system)
"""
Explanation: We can test it like this.
End of explanation
"""
def slope_func(state, t, system):
"""Computes derivatives of the state variables.
state: State (x, y, x velocity, y velocity)
t: time
system: System object with g, rho, C_d, area, mass
returns: sequence (vx, vy, ax, ay)
"""
R, V = state
mass, g = system.mass, system.g
a_drag = drag_force(V, system) / mass
a_grav = Vector(0, -g)
A = a_grav + a_drag
return V, A
"""
Explanation: Here's the slope function that computes acceleration due to gravity and drag.
End of explanation
"""
slope_func(system.init, 0, system)
"""
Explanation: Always test the slope function with the initial conditions.
End of explanation
"""
def event_func(state, t, system):
"""Stop when the y coordinate is 0.
state: State object
t: time
system: System object
returns: y coordinate
"""
R, V = state
return R.y
event_func(system.init, 0, system)
"""
Explanation: We can use an event function to stop the simulation when the ball hits the ground:
End of explanation
"""
results, details = run_ode_solver(system, slope_func, events=event_func)
details
"""
Explanation: Now we can call run_ode_solver
End of explanation
"""
flight_time = get_last_label(results) * s
"""
Explanation: The final label tells us the flight time.
End of explanation
"""
R_final = get_last_value(results.R)
x_dist = R_final.x
"""
Explanation: The final value of x tells us the how far the ball landed from home plate:
End of explanation
"""
xs = results.R.extract('x')
ys = results.R.extract('y')
xs.plot()
ys.plot()
decorate(xlabel='Time (s)',
ylabel='Position (m)')
savefig('figs/chap22-fig01.pdf')
"""
Explanation: Visualizing the results
The simplest way to visualize the results is to plot x and y as functions of time.
End of explanation
"""
vx = results.V.extract('x')
vy = results.V.extract('y')
vx.plot(label='vx')
vy.plot(label='vy')
decorate(xlabel='Time (s)',
ylabel='Velocity (m/s)')
"""
Explanation: We can plot the velocities the same way.
End of explanation
"""
def plot_trajectory(results):
xs = results.R.extract('x')
ys = results.R.extract('y')
plot(xs, ys, color='C2', label='trajectory')
decorate(xlabel='x position (m)',
ylabel='y position (m)')
plot_trajectory(results)
savefig('figs/chap22-fig02.pdf')
"""
Explanation: The x velocity slows down due to drag.
The y velocity drops quickly while drag and gravity are in the same direction, then more slowly after the ball starts to fall.
Another way to visualize the results is to plot y versus x. The result is the trajectory of the ball through its plane of motion.
End of explanation
"""
xs = results.R.extract('x')
ys = results.R.extract('y')
def draw_func(state, t):
set_xlim(xs)
set_ylim(ys)
x, y = state.R
plot(x, y, 'bo')
decorate(xlabel='x position (m)',
ylabel='y position (m)')
animate(results, draw_func)
"""
Explanation: Animation
One of the best ways to visualize the results of a physical model is animation. If there are problems with the model, animation can make them apparent.
The ModSimPy library provides animate, which takes as parameters a TimeSeries and a draw function.
The draw function should take as parameters a State object and the time. It should draw a single frame of the animation.
Inside the draw function, you almost always have to call set_xlim and set_ylim. Otherwise matplotlib auto-scales the axes, which is usually not what you want.
End of explanation
"""
V = Vector(3, 4)
type(V)
"""
Explanation: Exercise: Delete the lines that set the x and y axes (or comment them out) and see what the animation does.
Under the hood
Vector is a function that returns a ModSimVector object.
End of explanation
"""
isinstance(V, Quantity)
"""
Explanation: A ModSimVector is a specialized kind of Pint Quantity.
End of explanation
"""
V1 = V * m
type(V1)
"""
Explanation: There's one gotcha you might run into with Vectors and Quantities. If you multiply a ModSimVector and a Quantity, you get a ModSimVector:
End of explanation
"""
V2 = m * V
type(V2)
"""
Explanation: But if you multiply a Quantity and a Vector, you get a Quantity:
End of explanation
"""
V1.x, V1.y, V1.mag, V1.angle
"""
Explanation: With a ModSimVector you can get the coordinates using dot notation, as well as mag, mag2, and angle:
End of explanation
"""
V2[0], V2[1]
"""
Explanation: With a Quantity, you can't. But you can use indexing to get the coordinates:
End of explanation
"""
vector_mag(V2), vector_angle(V2)
"""
Explanation: And you can use vector functions to get the magnitude and angle.
End of explanation
"""
# Hint
system_no_drag = System(system, C_d=0)
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: And often you can avoid the whole issue by doing the multiplication with the ModSimVector on the left.
Exercises
Exercise: Run the simulation with and without air resistance. How wrong would we be if we ignored drag?
End of explanation
"""
# Hint
system2 = System(system, rho=1.0*kg/m**3)
# Solution goes here
# Solution goes here
"""
Explanation: Exercise: The baseball stadium in Denver, Colorado is 1,580 meters above sea level, where the density of air is about 1.0 kg / meter$^3$. How much farther would a ball hit with the same velocity and launch angle travel?
End of explanation
"""
baseball_drag = pd.read_csv('data/baseball_drag.csv')
mph = Quantity(baseball_drag['Velocity in mph'], UNITS.mph)
mps = mph.to(m/s)
baseball_drag.index = magnitude(mps)
baseball_drag.index.name = 'Velocity in meters per second'
baseball_drag
"""
Explanation: Exercise: The model so far is based on the assumption that coefficient of drag does not depend on velocity, but in reality it does. The following figure, from Adair, The Physics of Baseball, shows coefficient of drag as a function of velocity.
<img src="data/baseball_drag.png" width="400">
I used an online graph digitizer to extract the data and save it in a CSV file. Here's how we can read it:
End of explanation
"""
# Solution goes here
# Solution goes here
C_d = drag_interp(43 * m / s)
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Modify the model to include the dependence of C_d on velocity, and see how much it affects the results. Hint: use interpolate.
End of explanation
"""
|
aleph314/K2 | Data Preprocessing/Preprocessing_exercise.ipynb | gpl-3.0 | import pandas as pd
import numpy as np
"""
Explanation: Data Preprocessing
Imputation
You will often find yourself in a situation where you will be dealing with an incomplete dataset. There are many reasons why data may be missing: survey responses may have been optional, there may have been some sort of data recording error, or the information may simply just not be available. There are a plethora of ways to handle such situations, several of which we will explore in this exercise.
1 - Go do the Machine Learning Repository and download the Beijing PM2.5 Data, put it into a dataframe, giving the columns the proper names. Also be sure to familarize yourself with the data set before proceeding.
End of explanation
"""
pm2 = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv',
na_values='NA')
pm2.columns = ['id', 'year', 'month', 'day', 'hour', 'pm2', 'dew_point', 'temperature',
'pressure', 'wind_dir', 'wind_speed', 'hours_snow', 'hours_rain']
pm2.head()
pm2.info()
"""
Explanation: Attribute Information:
No: row number
year: year of data in this row
month: month of data in this row
day: day of data in this row
hour: hour of data in this row
pm2.5: PM2.5 concentration (ug/m^3)
DEWP: Dew Point (ƒ)
TEMP: Temperature (ƒ)
PRES: Pressure (hPa)
cbwd: Combined wind direction
Iws: Cumulated wind speed (m/s)
Is: Cumulated hours of snow
Ir: Cumulated hours of rain
End of explanation
"""
pm2.dropna(inplace=True)
pm2.describe().T
pm2.describe(include=['O'])
pm2.wind_dir.value_counts()
"""
Explanation: There ore over 2000 samples with the pm 2.5 value missing: since this is the value to predict I am going to drop them.
End of explanation
"""
# setting the seed
np.random.seed(0)
# creating an array of dimension equal to the number of cells of the dataframe and with exactly 5000 ones
dim = pm2.shape[0]*pm2.shape[1]
arr = np.array([0]*(dim-5000) + [1]*5000)
# shuffling and reshaping the array
np.random.shuffle(arr)
arr = arr.reshape(pm2.shape[0], pm2.shape[1])
# looping through all the values and setting the corresponding position in the dataframe to nan
it = np.nditer(arr, flags=['multi_index'])
while not it.finished:
if it[0] == 1:
pm2.iloc[it.multi_index[0], it.multi_index[1]] = np.nan
it.iternext()
# solution: inserted nans on all columns at random
data_na = pm2.copy()
nrow = data_na.shape[0]
for col in data_na:
rows = np.random.randint(0, nrow, 5000)
data_na[col].iloc[rows] = np.nan
pm2.info()
"""
Explanation: 2 - Suppose our data became corrupted after we downloaded it and values were missing. Randomly insert 5000 NaN into the dataset accross all the columns.
End of explanation
"""
# I'm dropping wind_dir and id
regr_cols = ['year', 'month', 'day', 'hour', 'dew_point', 'temperature',
'pressure', 'wind_speed', 'hours_snow', 'hours_rain', 'pm2']
pm2_regr = pm2.loc[:, regr_cols]
# in the solution there is no year, month, day and hour
# also, he discards hours_snow and hours_rain (though they aren't binary or categorical)
# from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
# X = pm2_regr.iloc[:, :-1]
# y = pm2_regr.iloc[:, -1]
# Xtrain, Xtest, ytrain, ytest = train_test_split(pm2_regr.iloc[:, :-1], pm2_regr.iloc[:, -1], test_size=0.2, random_state=0)
#just a note to self
pm2_regr1 = pm2_regr.dropna(thresh=7) # same as dropna without thresh
# thresh is the number of non nan columns required to mantain the rows
pm2_regr1 = pm2_regr.dropna(thresh=5)
pm2_regr1.info()
"""
Explanation: 3 - Which variables lend themselves to be in a regression model? Select those variables, and then fit a regression model for each of the following imputation strategies, commenting on your results.
- Dropping all rows with at least 1 NA
- Dropping all rows with at least 3 NA
- Imputing 0
- Mean
- Median
- Mode
End of explanation
"""
lr.fit(pm2_regr.dropna().iloc[:, :-1], pm2_regr.dropna().iloc[:, -1])
lr.score(pm2_regr.dropna().iloc[:, :-1], pm2_regr.dropna().iloc[:, -1])
"""
Explanation: Dropping all rows with at least 1 NA:
End of explanation
"""
lr.fit(pm2_regr.dropna(thresh=5).iloc[:, :-1], pm2_regr.dropna(thresh=5).iloc[:, -1])
lr.score(pm2_regr.dropna(thresh=5).iloc[:, :-1], pm2_regr.dropna(thresh=5).iloc[:, -1])
"""
Explanation: Dropping all row with at least 3 NA gets me an error because I have nans in some rows:
End of explanation
"""
lr.fit(pm2_regr.fillna(0).iloc[:, :-1], pm2_regr.fillna(0).iloc[:, -1])
lr.score(pm2_regr.fillna(0).iloc[:, :-1], pm2_regr.fillna(0).iloc[:, -1])
"""
Explanation: Imputing 0:
End of explanation
"""
imp = Imputer(strategy='mean')
pm2_regr_mean = imp.fit_transform(pm2_regr)
lr.fit(pm2_regr_mean[:, :-1], pm2_regr_mean[:, -1])
lr.score(pm2_regr_mean[:, :-1], pm2_regr_mean[:, -1])
"""
Explanation: Imputing the mean:
End of explanation
"""
imp = Imputer(strategy='median')
pm2_regr_median = imp.fit_transform(pm2_regr)
lr.fit(pm2_regr_median[:, :-1], pm2_regr_median[:, -1])
lr.score(pm2_regr_median[:, :-1], pm2_regr_median[:, -1])
"""
Explanation: The median:
End of explanation
"""
imp = Imputer(strategy='most_frequent')
pm2_regr_mode = imp.fit_transform(pm2_regr)
lr.fit(pm2_regr_mode[:, :-1], pm2_regr_mode[:, -1])
lr.score(pm2_regr_mode[:, :-1], pm2_regr_mode[:, -1])
"""
Explanation: And the mode:
End of explanation
"""
pm2_regr_imp = pm2_regr.dropna(subset=['year', 'month', 'day', 'hour', 'pm2'])
imp = Imputer(strategy = 'median')
pm2_regr_imp = imp.fit_transform(pm2_regr_imp)
lr.fit(pm2_regr_imp[:, :-1], pm2_regr_imp[:, -1])
lr.score(pm2_regr_imp[:, :-1], pm2_regr_imp[:, -1])
"""
Explanation: The best result I get is from simply dropping all rows with NAs, mean and median gives similar performances while the mode is the worst imputation (surprisingly worst than imputing 0, which is quite random).
Overall all strategies doesn't yield good results, I guess this fit is bad in general.
4 - Given the results in part (3), and your own ingenuity, come up with a new imputation strategy and try it out. Comment on your results.
I'm going to drop rows with NAs for the columns year, month and hour, pm2; I'm imputing the median for all other columns:
End of explanation
"""
pm2.describe(include=['O'])
"""
Explanation: The result is slightly better than simply imputing mean or median, but still worse than dropping all NAs.
Categorical Data
Sometimes your data will contain categorical variables which need to be handled carefully depending on the machine learning algorithm you choose to use. Encoding categorical variables comes in two flavors: oridinal (ordered) and nominal (unordered) features. In this exercise, you'll further explore the Beijing PM2.5 dataset, this time using categorical variables.
1 - Which variables are categorical? Encode them properly, taking care to insure that they are properly classified as either ordinal or nominal.
There is one categorical variable:
End of explanation
"""
# for simplicity I'm using pandas function
pm2_enc = pd.get_dummies(pm2)
pm2_enc = pm2_enc.loc[:, regr_cols[:-1] + ['wind_dir_NE', 'wind_dir_NW', 'wind_dir_SE', 'wind_dir_cv'] + regr_cols[-1:]].dropna()
# from solutions using sklearn:
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
l_enc = LabelEncoder()
oh_enc = OneHotEncoder(sparse=False)
# change categorical data labels to integers
data_sub = pm2.copy()
data_sub.wind_dir = l_enc.fit_transform(data_sub.wind_dir)
# one-hot encode
dummies = pd.DataFrame(oh_enc.fit_transform(data_sub.wind_dir.values.reshape(-1, 1)), columns=l_enc.classes_)
# join with original df
data_sub = data_sub.drop('wind_dir', axis=1)
data_sub = data_sub.join(dummies)
data_sub.head()
"""
Explanation: The variable is nominal, so I'm going to use one-hot encoding:
End of explanation
"""
lr.fit(pm2_enc.iloc[:, :-1], pm2_enc.iloc[:, -1])
lr.score(pm2_enc.iloc[:, :-1], pm2_enc.iloc[:, -1])
"""
Explanation: 2 - Perform a multilinear regression, using the classified data, removing the NA values. Comment on your results.
End of explanation
"""
# hours_snow and hours_rain are cumulative across days, so I'm taking the max for each day to see if it snowed
days = pm2_enc.groupby(['year', 'month', 'day'])['hours_snow', 'hours_rain'].max()
# creating columns for the encodings
days['snow'] = pd.Series(days['hours_snow'] > 0, dtype='int')
days['rain'] = pd.Series(days['hours_rain'] > 0, dtype='int')
days['rain_snow'] = pd.Series((days['hours_rain'] > 0) & (days['hours_snow'] > 0), dtype='int')
days['no_rain_snow'] = pd.Series((days['hours_rain'] == 0) & (days['hours_snow'] == 0), dtype='int')
# resetting index and dropping hours_snow and hours_rain
days.reset_index(inplace=True)
days.drop(['hours_snow', 'hours_rain'], inplace=True, axis=1)
# joining the dataframe with the new columns to the original one
pm2_enc = pm2_enc.merge(days, left_on=['year', 'month', 'day'], right_on=['year', 'month', 'day'])
pm2_enc.info()
lr.fit(pm2_enc.iloc[:, :-1], pm2_enc.iloc[:, -1])
lr.score(pm2_enc.iloc[:, :-1], pm2_enc.iloc[:, -1])
"""
Explanation: The results are a bit better than before, but the performances are still very bad.
3 - Create a new encoding for days in which it rained, snowed, neither, and both, and then rerun the regression. Are the results any better?
End of explanation
"""
# using pandas cut and subtracting 0.1 to include the min values
pm2_enc['wind_speed_quartile'] = pd.cut(pm2_enc.wind_speed,
bins=list(pm2_enc.wind_speed.quantile([0])-0.1) + list(pm2_enc.wind_speed.quantile([0.25, 0.5, 0.75, 1])),
labels=[0.25, 0.5, 0.75, 1])
# from solutions: using np.percentile:
quartile = np.percentile(data_sub['wind_speed_quartile'], [25, 50, 75, 100])
cat = []
for row in range(len(data_sub)):
wind_speed = data_sub['wind_speed_quartile'].iloc[row]
if wind_speed <= quartile[0]:
cat.append('1st')
if wind_speed <= quartile[1]:
cat.append('2nd')
if wind_speed <= quartile[2]:
cat.append('3rd')
if wind_speed <= quartile[3]:
cat.append('4th')
data_sub['wind_quart'] = cat
# and then create dummies...
# transforming the column in numeric
pm2_enc.wind_speed_quartile = pd.to_numeric(pm2_enc.wind_speed_quartile)
lr.fit(pm2_enc.iloc[:, :-1], pm2_enc.iloc[:, -1])
lr.score(pm2_enc.iloc[:, :-1], pm2_enc.iloc[:, -1])
"""
Explanation: Wow, now the fit is perfect!
4 - Create a new encoding for the quartile that a day falls under by wind speed and rerun the regression. Comment on your results.
End of explanation
"""
# using pandas cut and subtracting 0.1 to include the min values
pm2_enc['dew_point_decile'] = pd.cut(pm2_enc.dew_point,
bins=list(pm2_enc.dew_point.quantile([0])-0.1) + list(pm2_enc.dew_point.quantile([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])))
# from solutions: not using cut but creating new column and then:
data_sub.dew_dec = pd.Categorical(data_sub.dew_dec, categories=data_sub.dew_dec.unique(), ordered=True)
decile = pm2_enc.iloc[pm2_enc.temperature.argmax()].dew_point_decile
print(decile)
pm2_enc.loc[pm2_enc.dew_point_decile < decile]
"""
Explanation: The accuracy has gone down again after adding this new column, this may be due to the fact that this adds useless noise to the data or that this binning is too coarse maybe.
5 - Create a new encoding for deciles of the DEWP variable. Then select the row containing the highest temperature, and using Pandas category data type, select all rows in a lesser DEWP decile than this row.
End of explanation
"""
wine = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
wine.columns = ['class', 'alcohol', 'malic_acid', 'ash', 'alcalinity_ash', 'magnesium', 'total_phenols',
'flavanoids', 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity',
'hue', 'OD280_OD315', 'proline']
wine.head()
"""
Explanation: Feature Scaling
Many of the machine learning algorithms we have at our disposal require that the feautures be on the the same scale in order to work properly. In this exercise, you'll test out a few techniques with and without feature scaling and observe the outcomes.
1 - Head over to the Machine Learning Repository, download the Wine Dataset, and put it in a dataframe, being sure to label the columns properly.
End of explanation
"""
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
"""
Explanation: 2 - Fit a Nearest Neighbors model to the data, using a normalized data set, a stardardized data set, and the original. Split into test and train sets and compute the accuracy of the classifications and comment on your results.
End of explanation
"""
Xtrain, Xtest, ytrain, ytest = train_test_split(wine.iloc[:, 1:], wine.iloc[:, 0], test_size=0.3, random_state=0)
knn = KNeighborsClassifier()
knn.fit(Xtrain, ytrain)
print(knn.score(Xtrain, ytrain))
print(knn.score(Xtest, ytest))
"""
Explanation: Original dataset:
End of explanation
"""
mms = MinMaxScaler()
Xtrain_norm = mms.fit_transform(Xtrain)
Xtest_norm = mms.transform(Xtest)
knn.fit(Xtrain_norm, ytrain)
print(knn.score(Xtrain_norm, ytrain))
print(knn.score(Xtest_norm, ytest))
"""
Explanation: Normalized dataset:
End of explanation
"""
ssc = StandardScaler()
Xtrain_std = ssc.fit_transform(Xtrain)
Xtest_std = ssc.transform(Xtest)
knn.fit(Xtrain_std, ytrain)
print(knn.score(Xtrain_std, ytrain))
print(knn.score(Xtest_std, ytest))
"""
Explanation: Standardized dataset:
End of explanation
"""
from sklearn.naive_bayes import GaussianNB
"""
Explanation: The accuracy is way better for a normalized or standardized dataset, with the least having a slightly better generalization: K-Nearest Neighbors is sensitive to feature scaling.
3 - Fit a Naive Bayes model to the data, using a normalized data set, a standardized data set, and the original. Comment on your results.
End of explanation
"""
gnb = GaussianNB()
gnb.fit(Xtrain, ytrain)
print(gnb.score(Xtrain, ytrain))
print(gnb.score(Xtest, ytest))
"""
Explanation: Original dataset:
End of explanation
"""
gnb.fit(Xtrain_norm, ytrain)
print(gnb.score(Xtrain_norm, ytrain))
print(gnb.score(Xtest_norm, ytest))
"""
Explanation: Normalized dataset:
End of explanation
"""
gnb.fit(Xtrain_std, ytrain)
print(gnb.score(Xtrain_std, ytrain))
print(gnb.score(Xtest_std, ytest))
"""
Explanation: Standardized dataset:
End of explanation
"""
from sklearn.datasets import load_boston
boston = load_boston()
boston_df = pd.DataFrame(boston.data, columns=boston.feature_names)
boston_target = boston.target
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(boston_df, boston_target, test_size=0.3, random_state=0)
"""
Explanation: For this algorithm there is no difference at all, so scaling the data isn't necessary.
Feature Selection
With many datasets, you will find yourself in a situation where not all of the provided features are relevant to your model and it may be best to discard them. This is a very complex topic, involving many techniques, a few of which we will explore in this exercise, using the Boston housing data.
1 - From sklearn import the load_boston package, and put the data into a data frame with the proper column names, and then split into training and testing sets.
End of explanation
"""
from sklearn.feature_selection import f_classif, SelectKBest
from sklearn.metrics import mean_squared_error
# in the solutions he uses f_regression and not f_classif
# also, best features are obtained by cols[sel.get_support()] with cols = Xtrain.columns
# and lr is instantiated with normalize=True
from sklearn.feature_selection import f_regression
from sklearn.linear_model import LinearRegression
mse = []
cols = Xtrain.columns
lr = LinearRegression(normalize=True)
# looping through the number of features desired and storing the results in mse
for k in range(1, boston_df.shape[1]+1):
# using SelectKBest with the F-statistic as the score
sel = SelectKBest(score_func=f_regression, k=k)
# fitting the selector
sel.fit(Xtrain, ytrain)
# transforming train and test sets
Xtrain_k = sel.transform(Xtrain)
Xtest_k = sel.transform(Xtest)
# fitting linear regression model and printing out the k best features
lr.fit(Xtrain_k, ytrain)
print('Top {} features {}'.format(sel.k, cols[sel.get_support()]))
mse.append(mean_squared_error(lr.predict(Xtest_k), ytest))
mse
mse = []
# looping through the number of features desired and storing the results in mse
for k in range(1, boston_df.shape[1]+1):
# using SelectKBest with the F-statistic as the score
sel = SelectKBest(score_func=f_classif, k=k)
# fitting the selector
sel.fit(Xtrain, ytrain)
# transforming train and test sets
Xtrain_k = sel.transform(Xtrain)
Xtest_k = sel.transform(Xtest)
# fitting linear regression model and printing out the k best features
lr.fit(Xtrain_k, ytrain)
print('Top {} features {}'.format(k, pd.Series(sel.scores_, index=Xtrain.columns).\
sort_values(ascending=False).\
head(k).index.values))
mse.append(mean_squared_error(lr.predict(Xtest_k), ytest))
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(16, 8))
plt.plot(range(1, len(mse)+1), mse)
plt.title('MSE for models with different number of features')
plt.xlabel('Number of Features')
plt.ylabel('MSE');
"""
Explanation: 2 - Fit a series of least squares multilinear regression models to the data, and use the F-Statistic to select the K best features for values of k ranging from 1 to the total number of features. Plot the MSE for each model against the test set and print the best features for each iteration. Comment on your results.
End of explanation
"""
from sklearn.feature_selection import RFE
mse = []
# looping through the number of features desired and storing the results in mse
for k in range(1, boston_df.shape[1]+1):
# using Recursive Feature Selection with linear regression as estimator
sel = RFE(estimator=lr, n_features_to_select=k)
# fitting the selector
sel.fit(Xtrain, ytrain)
# transforming train and test sets
Xtrain_k = sel.transform(Xtrain)
Xtest_k = sel.transform(Xtest)
# fitting linear regression model and printing out the k best features
lr.fit(Xtrain_k, ytrain)
print('Top {} features {}'.format(k, pd.Series(sel.support_, index=Xtrain.columns).\
sort_values(ascending=False).\
head(k).index.values))
mse.append(mean_squared_error(lr.predict(Xtest_k), ytest))
plt.figure(figsize=(16, 8))
plt.plot(range(1, len(mse)+1), mse)
plt.title('MSE for models with different number of features')
plt.xlabel('Number of Features')
plt.ylabel('MSE');
"""
Explanation: The MSE keeps going down adding features, there is a great gain after the 11th feature is added.
3 - Do the same as in part (2) instead this time using recursive feature selection.
End of explanation
"""
# in solutions he doesn't use select from model but repeats the previous exercise only using ridge instead
# ok no, it does both
# in selectfrommodel it uses c_vals = np.arange(0.1, 2.1, 0.1) to loop through and the threshold is set to
# str(c) + '*mean' for c in c_vals
# also, he always fits the ridge model
from sklearn.linear_model import Ridge
from sklearn.feature_selection import SelectFromModel
# fitting ridge regression
ridge = Ridge()
c_vals = np.arange(0.1, 2.1, 0.1)
cols = Xtrain.columns
mse = []
# looping through the possible threshholds from above and storing the results in mse
for c in c_vals:
# using SelectFromModel with the ridge scores from above
selfrmod = SelectFromModel(ridge, threshold=str(c) + '*mean')
# fitting the selector
selfrmod.fit(Xtrain, ytrain)
# transforming train and test sets
Xtrain_k = selfrmod.transform(Xtrain)
Xtest_k = selfrmod.transform(Xtest)
# fitting linear regression model and printing out the k best features
ridge.fit(Xtrain_k, ytrain)
print('c={} features {}'.format(c, cols[selfrmod.get_support()]))
mse.append(mean_squared_error(ridge.predict(Xtest_k), ytest))
mse
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(16, 8))
plt.plot(c_vals, mse)
plt.title('MSE for different thresholds')
plt.xlabel('c')
plt.ylabel('MSE');
from sklearn.linear_model import Ridge
# from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
# fitting ridge regression
ridge = Ridge()
ridge.fit(Xtrain, ytrain)
# storing features importance
coef = ridge.coef_
mse = []
# looping through the possible threshholds from above and storing the results in mse
for k, thresh in enumerate(sorted(coef, reverse=True)):
# using SelectFromModel with the ridge scores from above
selfrmod = SelectFromModel(ridge, threshold=thresh)
# fitting the selector
selfrmod.fit(Xtrain, ytrain)
# transforming train and test sets
Xtrain_k = selfrmod.transform(Xtrain)
Xtest_k = selfrmod.transform(Xtest)
# fitting linear regression model and printing out the k best features
lr.fit(Xtrain_k, ytrain)
print('Top {} features {}'.format(k+1, pd.Series(ridge.coef_, index=Xtrain.columns).\
sort_values(ascending=False).\
head(k+1).index.values))
mse.append(mean_squared_error(lr.predict(Xtest_k), ytest))
plt.figure(figsize=(16, 8))
plt.plot(range(1, len(mse)+1), mse)
plt.title('MSE for models with different number of features')
plt.xlabel('Number of Features')
plt.ylabel('MSE');
"""
Explanation: The MSE keeps going down adding features but after the sixth feature is added there isn't much improvement.
4 - Fit a Ridge Regression model to the data and use recursive feature elimination and SelectFromModel in sklearn to select your features. Generate the same plots and best features as in parts (2) and (3) and comment and compare your results to what you have found previously.
End of explanation
"""
# again, in solutions he uses the c_vals as before and he fits the lasso
from sklearn.linear_model import LassoCV
from sklearn.feature_selection import SelectFromModel
# fitting ridge regression
lasso = LassoCV()
c_vals = np.arange(0.1, 2.1, 0.1)
cols = Xtrain.columns
mse = []
# looping through the possible threshholds from above and storing the results in mse
for c in c_vals:
# using SelectFromModel with the ridge scores from above
selfrmod = SelectFromModel(lasso, threshold=str(c) + '*mean')
# fitting the selector
selfrmod.fit(Xtrain, ytrain)
# transforming train and test sets
Xtrain_k = selfrmod.transform(Xtrain)
Xtest_k = selfrmod.transform(Xtest)
# fitting linear regression model and printing out the k best features
lasso.fit(Xtrain_k, ytrain)
print('c={} features {}'.format(c, cols[selfrmod.get_support()]))
mse.append(mean_squared_error(lasso.predict(Xtest_k), ytest))
mse
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(16, 8))
plt.plot(c_vals, mse)
plt.title('MSE for different thresholds')
plt.xlabel('c')
plt.ylabel('MSE');
from sklearn.linear_model import LassoCV
# fitting lasso regression
lasso = LassoCV()
lasso.fit(Xtrain, ytrain)
# storing features importance
coef = lasso.coef_
mse = []
# looping through the possible threshholds from above and storing the results in mse
for k, thresh in enumerate(sorted(coef, reverse=True)):
# using SelectFromModel with the lasso scores from above
selfrmod = SelectFromModel(lasso, threshold=thresh)
# fitting the selector
selfrmod.fit(Xtrain, ytrain)
# transforming train and test sets
Xtrain_k = selfrmod.transform(Xtrain)
Xtest_k = selfrmod.transform(Xtest)
# fitting linear regression model and printing out the k best features
lr.fit(Xtrain_k, ytrain)
print('Top {} features {}'.format(k+1, pd.Series(lasso.coef_, index=Xtrain.columns).\
sort_values(ascending=False).\
head(k+1).index.values))
mse.append(mean_squared_error(lr.predict(Xtest_k), ytest))
plt.figure(figsize=(16, 8))
plt.plot(range(1, len(mse)+1), mse)
plt.title('MSE for models with different number of features')
plt.xlabel('Number of Features')
plt.ylabel('MSE');
"""
Explanation: After the fourth feature there is no improvement.
Also, the MSE seems better than all the trials before.
5 - L1 regularization can also be used for model selection. Choose an algorithm in sklearn and repeat part (4) using model selection via regularization.
End of explanation
"""
|
Britefury/deep-learning-tutorial-pydata2016 | TUTORIAL 05 - Dogs vs cats with transfer learning.ipynb | mit | %matplotlib inline
"""
Explanation: Dogs vs Cats with Transfer Learning
In this Notebook we're going to use transfer learning to attempt to crack the Dogs vs Cats Kaggle competition.
We are going to downsample the images to 64x64; that's pretty small, but should be enough (I hope). Furthermore, large images means longer training time and I'm too impatient for that. ;)
<< Changes for transfer learning >>
The start and end of modifications to the code for transfer learning are marked in blocks with italic headings.
Lets have plots appear inline:
End of explanation
"""
import os, time, glob, tqdm
import numpy as np
from matplotlib import pyplot as plt
import torch, torch.nn as nn, torch.nn.functional as F
import torchvision
import skimage.transform, skimage.util
from skimage.util import montage
from sklearn.model_selection import StratifiedShuffleSplit
import cv2
from batchup import work_pool, data_source
import utils
import imagenet_classes
torch_device = torch.device('cuda:0')
"""
Explanation: We're going to need os, numpy, matplotlib, skimage, theano and lasagne. We also want to import some layer classes and utilities from Lasagne for convenience.
End of explanation
"""
TRAIN_PATH = r'E:\datasets\dogsvscats\train'
TEST_PATH = r'E:\datasets\dogsvscats\test1'
# Get the paths of the images
trainval_image_paths = glob.glob(os.path.join(TRAIN_PATH, '*.jpg'))
tests_image_paths = glob.glob(os.path.join(TEST_PATH, '*.jpg'))
"""
Explanation: Data loading
We are loading images from a folder of files, so we could approach this a number of ways.
Our dataset consists of 25,000 images so we could load them all into memory then access them from there. It would work, but it wouldn't scale. I'd prefer to demonstrate an approach that is more scalable and useful outside of this notebook, so we are going to load them on the fly.
Loading images on the fly poses a challenge as we may find that the GPU is waiting doing nothing while the CPU is loading images in order to build the next mini-batch to train with. It would therefore be desirable to load images in background threads so that mini-batches of images are ready to process when the GPU is able to take one. Luckily my batchup library can help here.
We must provide the logic for:
getting a list of paths where we can find the image files
given a list of indices identifying the images that are to make up this mini-batch, for each image in the mini-batch:
load each one
scale each one to the fixed size that we need
standardise each image (subtract mean, divide by standard deviation)
gather them in a mini-batch of shape (sample, channel, height, width)
Getting a list of paths where we can find the image files
Join the Kaggle competition and download the training and test data sets. Unzip them into a directory of your choosing, and modify the path definitions below to point to the appropriate location.
We split the images into training and validation later on, so we call them trainval for now.
End of explanation
"""
# The ground truth classifications are given by the filename having either a 'dog.' or 'cat.' prefix
# Use:
# 0: cat
# 1: dog
trainval_y = [(1 if os.path.basename(p).lower().startswith('dog.') else 0) for p in trainval_image_paths]
trainval_y = np.array(trainval_y).astype(np.int32)
"""
Explanation: Okay. We have our image paths. Now we need to create our ground truths. Luckily the filename of each file starts with either cat. or dog. indicating which it is. We will assign dogs a class of 1 and cats a class of 0.
End of explanation
"""
# We only want one split, with 10% of the data for validation
splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=12345)
# Get the training set and validation set sample indices
train_ndx, val_ndx = next(splitter.split(trainval_y, trainval_y))
print('{} training, {} validation'.format(len(train_ndx), len(val_ndx)))
"""
Explanation: Split into training and validation
We use Scikit-Learn StratifiedShuffleSplit for this.
End of explanation
"""
MODEL_MEAN = np.array([0.485, 0.456, 0.406])
MODEL_STD = np.array([0.229, 0.224, 0.225])
TARGET_SIZE = 64
def img_to_net(img):
"""
Convert an image from
image format; shape (height, width, channel) range [0-1]
to
network format; shape (channel, height, width), standardised by mean MODEL_MEAN and std-dev MODEL_STD
"""
# (H, W, C) -> (C, H, W)
img = (img - MODEL_MEAN) / MODEL_STD
img = img.transpose(2, 0, 1)
return img.astype(np.float32)
def net_to_img(img):
"""
Convert an image from
network format; shape (sample, channel, height, width), standardised by mean MODEL_MEAN and std-dev MODEL_STD
to
image format; shape (height, width, channel) range [0-1]
"""
# (C, H, W) -> (H, W, C)
img = img.transpose(1, 2, 0)
img = img * MODEL_STD + MODEL_MEAN
return img.astype(np.float32)
def load_image(path):
"""
Load an image from a given path and convert to network format (4D tensor)
"""
# Read
img = cv2.imread(path)
# OpenCV loads images in BGR channel order; reverse to RGB
img = img[:, :, ::-1]
# Compute scaled dimensions, while preserving aspect ratio
# py0, py1, px0, px1 are the padding required to get the image to `TARGET_SIZE` x `TARGET_SIZE`
if img.shape[0] >= img.shape[1]:
height = TARGET_SIZE
width = int(img.shape[1] * float(TARGET_SIZE) / float(img.shape[0]) + 0.5)
py0 = py1 = 0
px0 = (TARGET_SIZE - width) // 2
px1 = (TARGET_SIZE - width) - px0
else:
width = TARGET_SIZE
height = int(img.shape[0] * float(TARGET_SIZE) / float(img.shape[1]) + 0.5)
px0 = px1 = 0
py0 = (TARGET_SIZE - height) // 2
py1 = (TARGET_SIZE - height) - py0
# Resize the image using OpenCV resize
# We use OpenCV as it is fast
# We also resize *before* converting from uint8 type to float type as uint8 is significantly faster
img = cv2.resize(img, (width, height))
# Convert to float
img = skimage.util.img_as_float(img)
# Convert to network format
img = img_to_net(img)
# Apply padding to get it to a fixed size
img = np.pad(img, [(0, 0), (py0, py1), (px0, px1)], mode='constant')
return img
"""
Explanation: Define a function for loading a mini-batch of images
Given a list of indices into the train_image_paths list we must:
load each one
scale each one to the fixed size that we need
standardise each image (subtract mean, divide by standard deviation)
End of explanation
"""
plt.imshow(net_to_img(load_image(trainval_image_paths[0])))
plt.show()
"""
Explanation: Show an image to check our code so far:
End of explanation
"""
class ImageAccessor (object):
def __init__(self, paths):
"""
Constructor
paths - the list of paths of the images that we are to access
"""
self.paths = paths
def __len__(self):
"""
The length of this array
"""
return len(self.paths)
def __getitem__(self, item):
"""
Get images identified by item
item can be:
- an index as an integer
- an array of incies
"""
if isinstance(item, int):
# item is an integer; get a single item
path = self.paths[item]
return load_image(path)
elif isinstance(item, np.ndarray):
# item is an array of indices
# Get the paths of the images in the mini-batch
paths = [self.paths[i] for i in item]
# Load each image
images = [load_image(path) for path in paths]
# Stack in axis 0 to make an array of shape `(sample, channel, height, width)`
return np.stack(images, axis=0)
"""
Explanation: Looks okay.
Make a BatchUp data source
BatchUp can extract mini-batches from data sources that have an array-like interface.
We must first define an image accessor that looks like an array. We do this by implementing __len__ and __getitem__ methods:
End of explanation
"""
# image accessor
trainval_X = ImageAccessor(trainval_image_paths)
train_ds = data_source.ArrayDataSource([trainval_X, trainval_y], indices=train_ndx)
val_ds = data_source.ArrayDataSource([trainval_X, trainval_y], indices=val_ndx)
"""
Explanation: Now we make ArrayDataSource instances for the training and validation sets. These provide methods for getting mini-batches that we will use for training.
End of explanation
"""
# A pool with 4 threads
pool = work_pool.WorkerThreadPool(4)
"""
Explanation: Process mini-batches in background threads
We want to do all the image loading in background threads so that the images are ready for the main thread that must feed the GPU with data to work on.
BatchUp provides worker pools for this purpose.
End of explanation
"""
train_ds = pool.parallel_data_source(train_ds)
val_ds = pool.parallel_data_source(val_ds)
"""
Explanation: Wrap our training and validation data sources so that they generate mini-batches in parallel background threads
End of explanation
"""
class XferPetClassifier (nn.Module):
def __init__(self, pretrained_vgg16):
super(XferPetClassifier, self).__init__()
self.features = pretrained_vgg16.features
# Size at this point will be 512 channels, 2x2
self.fc6 = nn.Linear(512 * 2 * 2, 256)
self.drop = nn.Dropout()
self.fc7 = nn.Linear(256, 2)
def forward(self, x):
x = self.features(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc6(x))
x = self.drop(x)
x = self.fc7(x)
return x
# Build it
vgg16 = torchvision.models.vgg.vgg16(pretrained=True)
pet_net = XferPetClassifier(vgg16).to(torch_device)
"""
Explanation: << CHANGES START HERE >>
Build the network using the convolutional layers from VGG-16
Now we will define a class for the pet classifier network.
End of explanation
"""
loss_function = nn.CrossEntropyLoss()
# Get a list of all of the parameters
all_params = list(pet_net.parameters())
# Get a list of pre-trained parameters
pretrained_params = list(pet_net.features.parameters())
# Get their IDs and use to get a list of new parameters
pretrained_param_ids = set([id(p) for p in pretrained_params])
new_params = [p for p in all_params if id(p) not in pretrained_param_ids]
# Build optimizer with separate learning rates for pre-trained and new parameters
optimizer = torch.optim.Adam([dict(params=new_params, lr=1e-3),
dict(params=pretrained_params, lr=1e-4)])
"""
Explanation: Set up loss and optimizer
We separate the pre-trained parameters from the new parameters. We train the pre-trained parameters using a learning rate that is 10 times smaller.
End of explanation
"""
NUM_EPOCHS = 25
BATCH_SIZE = 128
"""
Explanation: << CHANGES END HERE >>
Train the network
Define settings for training; note we only need 25 epochs here:
End of explanation
"""
print('Training...')
for epoch_i in range(NUM_EPOCHS):
t1 = time.time()
# TRAIN
pet_net.train()
train_loss = 0.0
n_batches = 0
# Ask train_ds for batches of size `BATCH_SIZE` and shuffled in random order
for i, (batch_X, batch_y) in enumerate(train_ds.batch_iterator(batch_size=BATCH_SIZE, shuffle=True)):
t_x = torch.tensor(batch_X, dtype=torch.float, device=torch_device)
t_y = torch.tensor(batch_y, dtype=torch.long, device=torch_device)
# Clear gradients
optimizer.zero_grad()
# Predict logits
pred_logits = pet_net(t_x)
# Compute loss
loss = loss_function(pred_logits, t_y)
# Back-prop
loss.backward()
# Optimizer step
optimizer.step()
# Accumulate training loss
train_loss += float(loss)
n_batches += 1
# Divide by number of samples to get mean loss
train_loss /= float(n_batches)
# VALIDATE
pet_net.eval()
val_loss = val_err = 0.0
# For each batch:
with torch.no_grad():
for batch_X, batch_y in val_ds.batch_iterator(batch_size=BATCH_SIZE, shuffle=False):
t_x = torch.tensor(batch_X, dtype=torch.float, device=torch_device)
# Predict logits
pred_logits = pet_net(t_x).detach().cpu().numpy()
pred_cls = np.argmax(pred_logits, axis=1)
val_err += (batch_y != pred_cls).sum()
# Divide by number of samples to get mean loss and error
val_err /= float(len(val_ndx))
t2 = time.time()
# REPORT
print('Epoch {} took {:.2f}s: train loss={:.6f}; val err={:.2%}'.format(
epoch_i, t2 - t1, train_loss, val_err))
"""
Explanation: The training loop:
End of explanation
"""
# Number of samples to try
N_TEST = 15
# Shuffle test sample indcies
rng = np.random.RandomState(12345)
test_ndx = rng.permutation(len(tests_image_paths))
# Select first `N_TEST` samples
test_ndx = test_ndx[:N_TEST]
for test_i in test_ndx:
# Load the image
X = load_image(tests_image_paths[test_i])
with torch.no_grad():
t_x = torch.tensor(X[None, ...], dtype=torch.float, device=torch_device)
# Predict class probabilities
pred_logits = pet_net(t_x)
pred_prob = F.softmax(pred_logits, dim=1).detach().cpu().numpy()
# Get predicted class
pred_y = np.argmax(pred_prob, axis=1)
# Get class name
pred_cls = 'dog' if pred_y[0] == 1 else 'cat'
# Report
print('Sample {}: predicted as {}, confidence {:.2%}'.format(test_i, pred_cls, pred_prob[0,pred_y[0]]))
# Show the image
plt.figure()
plt.imshow(net_to_img(X))
plt.show()
"""
Explanation: Apply to some example images from the test set
End of explanation
"""
|
shagunsodhani/PyDelhiConf2017 | notebook/Demo.ipynb | mit | def mul(a, b):
return a*b
mul(2, 3)
mul = lambda a, b: a*b
mul(2, 3)
"""
Explanation: Functional Programming in Python <center>
<p>
<p>
Shagun Sodhani
# Functions as first class citizens
End of explanation
"""
mul(mul(2, 3), 3)
def transform_and_add(func, a, b):
return func(a) + func(b)
transform_and_add(lambda x: x**2, 1, 2)
"""
Explanation: Lambda is another way of defining a function
Higher Order Functions
End of explanation
"""
def square_and_add(a, b):
return (a**2 + b**2)
def cube_and_add(a, b):
return (a**3 + b**3)
def quad_and_add(a, b):
return (a**4 + b**4)
print(square_and_add(1, 2))
print(cube_and_add(1, 2))
print(quad_and_add(1, 2))
square = lambda x: x**2
cube = lambda x: x**3
quad = lambda x: x**4
print(square_and_add(1, 2) == transform_and_add(square, 1, 2))
print(cube_and_add(1, 2) == transform_and_add(cube, 1, 2))
print(quad_and_add(1, 2) == transform_and_add(quad, 1, 2))
def square_and_add(a, b):
return (a**2 + b**2)
def cube_and_mul(a, b):
return ((a**3) * (b**3))
def quad_and_div(a, b):
return ((a**4) / (b**4))
print(square_and_add(1, 2))
print(cube_and_mul(1, 2))
print(quad_and_div(1, 2))
def transform_and_reduce(func_transform, func_reduce, a, b):
return func_reduce(func_transform(a), func_transform(b))
print(square_and_add(1, 2) == transform_and_reduce(square, lambda x, y: x+y, 1, 2))
print(cube_and_mul(1, 2) == transform_and_reduce(cube, lambda x, y: x*y, 1, 2))
print(quad_and_div(1, 2) == transform_and_reduce(quad, lambda x, y: x/y, 1, 2))
"""
Explanation: Why would I want something like this?
A Familiar Pattern
End of explanation
"""
import operator
print(square_and_add(1, 2) == transform_and_reduce(square, operator.add, 1, 2))
print(cube_and_mul(1, 2) == transform_and_reduce(cube, operator.mul, 1, 2))
print(quad_and_div(1, 2) == transform_and_reduce(quad, operator.truediv, 1, 2))
"""
Explanation: Operators to the rescure
End of explanation
"""
print(square_and_add(1, 2) == transform_and_reduce(lambda x: x**2, lambda x, y: x+y, 1, 2))
print(cube_and_mul(1, 2) == transform_and_reduce(lambda x: x**3, lambda x, y: x*y, 1, 2))
print(quad_and_div(1, 2) == transform_and_reduce(lambda x: x**4, lambda x, y: x/y, 1, 2))
"""
Explanation: Lets do some maths
Number of transform functions = m
Number of reduce functions = n
Number of functions in the first workflow = m*n
Number of functions in the second workflow = m + n
Write small, re-useable function
End of explanation
"""
from time import time
def timer(func):
def inner(*args, **kwargs):
t = time()
func(*args, **kwargs)
print("Time take = {time}".format(time = time() - t))
return inner
def echo_func(input):
print(input)
timed_echo = timer(echo_func)
timed_echo(1000000)
"""
Explanation: Function returns Function
End of explanation
"""
def logger(level, message):
print("{level}: {message}".format(level = level, message = message))
def debug(message):
return logger("debug", message)
def info(messgae):
return logger("info", message)
debug("Error 404")
from functools import partial
debug = partial(logger, "debug")
info = partial(logger, "info")
debug("Error 404")
"""
Explanation: Partial Functions
End of explanation
"""
partial(logger, "debug")("Error 404")
"""
Explanation: debug("Error 404") = partial(logger, "debug")("Error 404")
End of explanation
"""
def transform_and_add(func_transform, a, b):
return func_transform(a) + func_transform(b)
def curry_transform_and_add(func_transform):
def apply(a, b):
return func_transform(a) + func_transform(b)
return apply
print(transform_and_add(cube, 1, 2) == curry_transform_and_add(cube)(1, 2))
"""
Explanation: Currying
f(a, b, c) => g(a)(b)(c)
End of explanation
"""
input_list = [1, 2, 3, 4]
squared_list = map(lambda x: x**2, input_list)
print(type(squared_list))
print(next(squared_list))
print(next(squared_list))
from functools import reduce
sum_list = reduce(operator.add, input_list)
print(sum_list)
sum_squared_list = reduce(operator.add,
map(lambda x: x**2, input_list))
print(sum_squared_list)
even_list = list(
filter(lambda x: x%2==0, input_list))
sum_even_list = reduce(operator.add, even_list)
print(sum_even_list)
print(reduce(operator.add,
(map(lambda x: x**2,
filter(lambda x: x%2==0, input_list)))))
"""
Explanation: Currying gets you specialized functions from more general functions
Map, Reduce, Filter
An alternate view to iteration
End of explanation
"""
from itertools import accumulate
acc = accumulate(input_list, operator.add)
print(input_list)
print(type(acc))
print(next(acc))
print(next(acc))
print(next(acc))
"""
Explanation: Benefits
Functional
One-liner
Elemental operations
itertools — Functions creating iterators for efficient looping
End of explanation
"""
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n - 1)
"""
Explanation: Recursion
End of explanation
"""
print(input_list)
collection = list()
is_even = lambda x: x%2==0
for data in input_list:
if(is_even(data)):
collection.append(data)
else:
collection.append(data*2)
print(collection)
collection = [data if is_even(data) else data*2
for data in input_list]
print(collection)
"""
Explanation: Sadly, no tail recursion
Comprehension
End of explanation
"""
collection = (data if is_even(data) else data*2
for data in input_list)
print(collection)
"""
Explanation: Generators
End of explanation
"""
def pipeline_each(data, fns):
return reduce(lambda a, x: map(x, a),
fns,
data)
import re
strings_to_clean = ["apple https://www.apple.com/",
"google https://www.google.com/",
"facebook https://www.facebook.com/"]
def format_string(input_string):
return re.sub(r"http\S+", "", input_string).strip().title()
for _str in map(format_string, strings_to_clean):
print(_str)
"""
Explanation: Pipelines
Sequence of Operations
End of explanation
"""
import re
def remove_url(input_string):
return re.sub(r"http\S+", "", input_string).strip()
def title_case(input_string):
return input_string.title()
def format_string(input_string):
return title_case(remove_url(input_string))
for _str in map(format_string, strings_to_clean):
print(_str)
"""
Explanation: No Modularity
End of explanation
"""
import re
for _str in pipeline_each(strings_to_clean, [remove_url,
title_case]):
print(_str)
"""
Explanation: f(g(h(i(...x)))
Modular but Ugly
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion | notebooks/recommendation_systems/solutions/2_als_bqml.ipynb | apache-2.0 | PROJECT = !(gcloud config get-value core/project)
PROJECT = PROJECT[0]
%env PROJECT=$PROJECT
%%bash
rm -r bqml_data
mkdir bqml_data
cd bqml_data
curl -O 'http://files.grouplens.org/datasets/movielens/ml-20m.zip'
unzip ml-20m.zip
yes | bq rm -r $PROJECT:movielens
bq --location=US mk --dataset \
--description 'Movie Recommendations' \
$PROJECT:movielens
bq --location=US load --source_format=CSV \
--autodetect movielens.ratings ml-20m/ratings.csv
bq --location=US load --source_format=CSV \
--autodetect movielens.movies_raw ml-20m/movies.csv
"""
Explanation: Collaborative filtering on the MovieLense Dataset
Learning Objectives
Know how to explore the data using BigQuery
Know how to use the model to make recommendations for a user
Know how to use the model to recommend an item to a group of users
This notebook is based on part of Chapter 9 of BigQuery: The Definitive Guide by Lakshmanan and Tigani.
MovieLens dataset
To illustrate recommender systems in action, let’s use the MovieLens dataset. This is a dataset of movie reviews released by GroupLens, a research lab in the Department of Computer Science and Engineering at the University of Minnesota, through funding by the US National Science Foundation.
Download the data and load it as a BigQuery table using:
End of explanation
"""
%%bigquery --project $PROJECT
SELECT *
FROM movielens.ratings
LIMIT 10
"""
Explanation: Exploring the data
Two tables should now be available in <a href="https://console.cloud.google.com/bigquery">BigQuery</a>.
Collaborative filtering provides a way to generate product recommendations for users, or user targeting for products. The starting point is a table, <b>movielens.ratings</b>, with three columns: a user id, an item id, and the rating that the user gave the product. This table can be sparse -- users don’t have to rate all products. Then, based on just the ratings, the technique finds similar users and similar products and determines the rating that a user would give an unseen product. Then, we can recommend the products with the highest predicted ratings to users, or target products at users with the highest predicted ratings.
End of explanation
"""
%%bigquery --project $PROJECT
SELECT
COUNT(DISTINCT userId) numUsers,
COUNT(DISTINCT movieId) numMovies,
COUNT(*) totalRatings
FROM movielens.ratings
"""
Explanation: A quick exploratory query yields that the dataset consists of over 138 thousand users, nearly 27 thousand movies, and a little more than 20 million ratings, confirming that the data has been loaded successfully.
End of explanation
"""
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies_raw
WHERE movieId < 5
"""
Explanation: On examining the first few movies using the query following query, we can see that the genres column is a formatted string:
End of explanation
"""
%%bigquery --project $PROJECT
CREATE OR REPLACE TABLE movielens.movies AS
SELECT * REPLACE(SPLIT(genres, "|") AS genres)
FROM movielens.movies_raw
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies
WHERE movieId < 5
"""
Explanation: We can parse the genres into an array and rewrite the table as follows:
End of explanation
"""
%%bigquery --project $PROJECT
SELECT iteration, loss, duration_ms
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender`)
"""
Explanation: Matrix factorization
Matrix factorization is a collaborative filtering technique that relies on factorizing the ratings matrix into two vectors called the user factors and the item factors. The user factors is a low-dimensional representation of a user_id and the item factors similarly represents an item_id.
Note: MF model training requires BQ flat rate contract. So here we will retrieve pre-trained model from external project.
If you activated flat rate pricing in BQ, you can train MF model with this Query.
```SQL
CREATE OR REPLACE MODEL movielens.recommender
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId', rating_col='rating')
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
```
End of explanation
"""
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender`, (
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g
WHERE g = 'Comedy'
))
ORDER BY predicted_rating DESC
LIMIT 5
"""
Explanation: Making recommendations
With the trained model, we can now provide recommendations. For example, let’s find the best comedy movies to recommend to the user whose userId is 903. In the query below, we are calling ML.PREDICT passing in the trained recommendation model and providing a set of movieId and userId to carry out the predictions on. In this case, it’s just one userId (903), but all movies whose genre includes Comedy.
End of explanation
"""
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender`, (
WITH seen AS (
SELECT ARRAY_AGG(movieId) AS movies
FROM movielens.ratings
WHERE userId = 903
)
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g, seen
WHERE g = 'Comedy' AND movieId NOT IN UNNEST(seen.movies)
))
ORDER BY predicted_rating DESC
LIMIT 5
"""
Explanation: Filtering out already rated movies
Of course, this includes movies the user has already seen and rated in the past. Let’s remove them.
TODO 1: Make a prediction for user 903 that does not include already seen movies.
End of explanation
"""
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender`, (
WITH allUsers AS (
SELECT DISTINCT userId
FROM movielens.ratings
)
SELECT
96481 AS movieId,
(SELECT title FROM movielens.movies WHERE movieId=96481) title,
userId
FROM
allUsers
))
ORDER BY predicted_rating DESC
LIMIT 5
"""
Explanation: For this user, this happens to yield the same set of movies -- the top predicted ratings didn’t include any of the movies the user has already seen.
Customer targeting
In the previous section, we looked at how to identify the top-rated movies for a specific user. Sometimes, we have a product and have to find the customers who are likely to appreciate it. Suppose, for example, we wish to get more reviews for movieId = 96481 (American Mullet) which has only one rating and we wish to send coupons to the 5 users who are likely to rate it the highest.
TODO 2: Find the top five users who will likely enjoy American Mullet (2001)
End of explanation
"""
%%bigquery --project $PROJECT
SELECT *
FROM ML.RECOMMEND(MODEL `cloud-training-demos.movielens.recommender`)
LIMIT 10
"""
Explanation: Batch predictions for all users and movies
What if we wish to carry out predictions for every user and movie combination? Instead of having to pull distinct users and movies as in the previous query, a convenience function is provided to carry out batch predictions for all movieId and userId encountered during training. A limit is applied here, otherwise, all user-movie predictions will be returned and will crash the notebook.
End of explanation
"""
|
cuemacro/finmarketpy | finmarketpy_examples/finmarketpy_notebooks/market_data_example.ipynb | apache-2.0 | import datetime
from chartpy import Chart, Style
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
# So we don't see deprecated warnings... when you're coding it's usually good to leave these!
import warnings
warnings.filterwarnings('ignore')
# Disable logging messages, to make output tidier
import logging
import sys
logging.disable(sys.maxsize)
"""
Explanation: Downloading market data examples
Saeed Amen - saeed@cuemacro.com
Getting market data using findatapy for a number of different data sources. findatapy is used extensively in finmarketpy. The idea is that we use a common API for all these different data sources, which makes it easier to combine them together in our analysis. Here we'll show some examples of downloading data from quandl and bloomberg.
findatapy also has ticker mapping functionality that allows you to use your own nicknames for tickers, rather than the vendor tickers. There are lots of in built ticker mappings already (defined in CSV files which you can edit in the config folder). Later we show, how to download our predefined ticker mappings and also how to query them, to extract the original vendor tickers. Most of the predefined tickers involve FX markets, given the focus of Cuemacro.
Let's first do some imports for objects we'll use later from chartpy, findatapy and finmarketpy. We'll also disable the log output.
End of explanation
"""
chart = Chart(engine='matplotlib')
market = Market(market_data_generator=MarketDataGenerator())
"""
Explanation: Set the engine to output the chart. One of the engines we can use is matplotlib and plotly is also supported. Create the Market object for fetching data in conjunction with MarketDataRequest objects.
End of explanation
"""
try:
import os
QUANDL_API_KEY = os.environ['QUANDL_API_KEY']
except:
QUANDL_API_KEY = 'TYPE_YOUR_KEY_HERE'
# Monthly average of UK resident monetary financial institutions' (excl. Central Bank) sterling
# Weighted average interest rate, other loans, new advances, on a fixed rate to private non-financial corporations (in percent)
# not seasonally adjusted
md_request = MarketDataRequest(
start_date="01 Jan 2005", # start date
data_source='quandl', # use Quandl as data source
tickers=['Weighted interest rate'],
fields=['close'], # which fields to download
vendor_tickers=['BOE/CFMBJ84'], # ticker (Bloomberg)
vendor_fields=['close'], # which Bloomberg fields to download
cache_algo='internet_load_return',
quandl_api_key=QUANDL_API_KEY) # how to return data
"""
Explanation: What is the MarketDataRequest?
MarketDataRequest is used to request market data from a data vendor/external source. Below we mention some of the properties.
freq represents the frequency of the data we want to download. Note that not every data vendor will provide high frequency data for every ticker.
daily - daily data (default)
tick - tick data (eg. dukascopy, fxcm, bloomberg, eikon)
intraday - minute data (eg. bloomberg, eikon)
findatapy supports many different data_source from a number of different vendors (both traditional market data and cryptocurrencies). Here we write a selection. We are continually adding more sources. If you'd like to sponsor the addition of a new data source let us know!
bloomberg - Bloomberg terminal/blpapi (paid)
eikon - Refinitiv Eikon (paid)
quandl - Quandl has a mix of free and premium data
dukascopy - retail FX tick data (free)
fxcm - retail FX tick data (free)
alfred - ALFRED/FRED mostly economic data (free)
yahoo - equities data (free)
bitcoincharts, poloniex, binance, bitfinex, gdax, kraken, bitmex, alphavantage, huobi - crypto data
Or we can give MarketDataRequest a csv or parquet file as a source
tickers: (can be list) eg. EURUSD - our nickname for tickers to use internally in findatapy
vendor_tickers: (can be list) eg. EURUSD Curncy - the particular vendor ticker to use for that same asset
fields: (can be list) eg. close - our nickname for the field
vendor_fields: (can be list) eg. PX_LAST - the particular vendor field
cache_algo (eg. internet, memory) eg. internet_load_return - internet will forcibly download from the internet
quandl_api_key: insert API key from Quandl (freely available from their website)
Downloading from Quandl example
In the below example, we download the monthly average interest rate for UK resident monetary financial institutions, using Quandl.
End of explanation
"""
df = market.fetch_market(md_request)
"""
Explanation: We've defined the MarketDataRequest and now we can download it.
End of explanation
"""
style = Style()
style.title = 'BoE weighted interest rate'
style.scale_factor = 3
style.file_output = "boe-rate.png"
style.source = 'Quandl/BoE'
"""
Explanation: Set style for graph, axis labels and plot. Creates an object with essentially the same characteristics as a matplotlib graph.
End of explanation
"""
chart.plot(df, style=style)
"""
Explanation: Now plot it!
End of explanation
"""
df = market.fetch_market("fx.bloomberg.daily.NYC.EURUSD.close")
print(df)
"""
Explanation: Using strings to make data requests
We noted that we have predefined tickers already in our ticker mapping framework. We also showed how to use MarketDataRequest to download market data, whether it is predefined or not. Here we are downloading EURUSD data from Bloomberg with NY close, but rather than using a MarketDataRequest we just use a single string, which obviously has a lot less boilerplate code. By default it will select one week. Note for this section to work, you will need to run this on a machine with blpapi installed, and a Bloomberg Terminal subscription on there.
End of explanation
"""
df = market.fetch_market("fx.bloomberg.intraday.NYC.EURUSD.close", start_date='week')
print(df)
"""
Explanation: Let's download intraday EURUSD data in the same way, which is a predefined ticker (underneath this will map to vendor_tickers for Bloomberg EURUSD CMPN Curncy.
End of explanation
"""
df = market.fetch_market("raw.data_source.bloomberg.tickers.VIX.vendor_tickers.VIX Index", start_date='week')
print(df)
"""
Explanation: We can download arbitary tickers, which are not predefined too in our CSV files, using the raw keyword, and select whatever tickers we want. We haven't specified all the parameters like freq or fields, in which case the defaults we use. Here it would be daily and close on Bloomberg which corresponds to PX_LAST.
End of explanation
"""
from findatapy.util import ConfigManager
cm = ConfigManager().get_instance()
# Get all the categories for raw data (note this won't include generated categories like fx-vol-market,
# which aggregate from many other categories)
categories = list(cm.get_categories_from_tickers())
# Filter those categories which include quandl
quandl_category = [x for x in categories if 'quandl' in x]
print(quandl_category)
# Filter those categories which include bloomberg and print the first few
bloomberg_category = [x for x in categories if 'bloomberg' in x][0:10]
print(bloomberg_category)
"""
Explanation: Querying for predefined tickers
We can also download all the tickers in our predefined tickers list (which are defined in our CSV files), which has this format, and will likely contain many tickers.
category.data_source.freq.cut
We can also specify individual predefined tickers and fields as follows:
category.data_source.freq.cut.tickers.fields
Let's get all the predefined tickers/categories, which are from quandl and a few from bloomberg.
End of explanation
"""
try:
import os
QUANDL_API_KEY = os.environ['QUANDL_API_KEY']
except:
QUANDL_API_KEY = 'TYPE_YOUR_KEY_HERE'
df = market.fetch_market(md_request_str="fx.quandl.daily.NYC", start_date='year',
md_request=MarketDataRequest(quandl_api_key=QUANDL_API_KEY))
print(df.head(5))
"""
Explanation: Let's download all the tickers in the category/data_source/freq/cut for fx.quandl.daily.NYC.
End of explanation
"""
# For this category, get all the tickers, vendor_tickers and fields which are available
tickers = cm.get_tickers_list_for_category_str(categories[0])
fields = cm.get_fields_list_for_category_str(categories[0])
vendor_tickers = cm.get_vendor_tickers_list_for_category_str("fx.quandl.daily.NYC")
# We don't need to add the environment (eg. backtest)
print("For category " + quandl_category[0])
print("tickers = " + str(tickers))
print("vendor_tickers = " + str(vendor_tickers))
print("fields = " + str(fields))
"""
Explanation: We can also query this category combination too for all available tickers/fields/vendor tickers.
End of explanation
"""
df = market.fetch_market(md_request_str="fx.quandl.daily.NYC.EURUSD", start_date='year',
md_request=MarketDataRequest(quandl_api_key=QUANDL_API_KEY))
print(df.head(5))
"""
Explanation: Let's download EURUSD from Quandl, using our predefined mapping.
End of explanation
"""
vendor_tickers = cm.convert_library_to_vendor_ticker_str("fx.quandl.daily.NYC.EURUSD")
print(vendor_tickers)
"""
Explanation: We can extract the vendor_tickers for our predefined ticker, not just for large categories.
End of explanation
"""
vendor_fields = cm.convert_library_to_vendor_field('bloomberg', 'close')
print(vendor_fields)
"""
Explanation: We can convert from our predefined fields to the vendor field. Here we show that our close field for bloomberg is PX_LAST on Bloomberg.
End of explanation
"""
|
abonaca/streakline | example/orbit.ipynb | mit | from __future__ import print_function, division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.constants import G
import streakline
%matplotlib inline
mpl.rcParams['figure.figsize'] = (8,8)
mpl.rcParams['font.size'] = 18
"""
Explanation: Orbit in a point-mass potential
This example shows how to get the orbit of a tracer particle in a point-mass potential using the streakline module.
First we'll load the required modules and setup the notebook:
End of explanation
"""
M = 1*u.Msun
x_ = np.array([1, 0, 0])*u.au
"""
Explanation: Next, let's define our potential by a $1\,\rm M_\odot$ point mass, and put our tracer particle initially at $1\,\rm AU$.
End of explanation
"""
vc = np.sqrt(G*M/np.sqrt(np.sum(x_**2)))
vc.to(u.km/u.s)
v_ = np.array([0, vc.value, 0])*vc.unit
"""
Explanation: Let's also place our tracer on a circular orbit:
End of explanation
"""
potential_ = 'point'
integrator_ = 'lf'
age = 1*u.yr
dt_ = 1*u.day
sign = 1.
"""
Explanation: Now that the potential and the initial conditions are set, we need to define a few of the orbital integration parameters. First, we need to set the type of potential, next the integrator, then how long we want to integrate the orbit, with how big time steps, and in which direction (+1 for forward in time, -1 for back in time).
End of explanation
"""
def get_intid(integrator):
"""Assign integrator ID for a given integrator choice
Parameter:
integrator - either 'lf' for leap frog or 'rk' for Runge-Kutta
"""
integrator_dict = {'lf': 0, 'rk': 1}
return integrator_dict[integrator]
def get_potid(potential):
"""Assign potential ID for a given potential choice
Parameter:
potential - one of the following:
'point' -- point mass
'log' -- triaxial logarithmic halo
'nfw' -- triaxial NFW halo
'gal' -- Hernquist bulge + Miyamoto-Nagai disk + triaxial NFW halo
"""
potential_dict = {'point': 0, 'log': 2, 'nfw': 3, 'gal': 4}
return potential_dict[potential]
"""
Explanation: To speed up the calculations in the streakline module, integrator and potential variables are assigned an integer ids, which will be input for the orbit integrator. Here are a couple of helper functions that do the translation.
End of explanation
"""
x = x_.si.value
v = v_.si.value
params = [M.si.value,]
potential = get_potid(potential_)
integrator = get_intid(integrator_)
N = int(age/dt_)
dt = dt_.si.value
"""
Explanation: So far, we've made use of astropy units, which simplifies calculations in python. However, the streakline code is written in c for performance, and expects all inputs in SI units.
We'll convert the initial position and velocity, as well as the time step and all of the potential parameters to SI. Potential parameters are passed in a list, and for point mass potential it only consists of a point mass value. We get the potential and integrator IDs using the above helper functions, and calculate the number of time steps we want the integrator to make.
End of explanation
"""
orbit_ = streakline.orbit(x, v, params, potential, integrator, N, dt, sign)
orbit = {}
orbit['x'] = orbit_[:3]*u.m
orbit['v'] = orbit_[3:]*u.m/u.s
"""
Explanation: Now we have all the input parameters for the orbit integrator. It is called by streakline.orbit(x_init, v_init, potential_params, potential_id, integrator_id, Nsteps, time_step, sign). This function returns a $6\times\rm N_{step}$ array, with the orbital evolution of a tracer particle. The columns of the array are: $x$, $y$, $z$, $v_x$, $v_y$, $v_z$ (all in SI).
End of explanation
"""
plt.figure()
plt.plot(orbit['x'][0].to(u.au), orbit['x'][1].to(u.au), 'k-', lw=4, zorder=0)
circle = mpl.patches.Circle((0,0), radius=1, lw=2, ec='r', fc='none', zorder=1)
plt.gca().add_artist(circle)
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel("x (AU)");
plt.ylabel("y (AU)");
"""
Explanation: Let's check how well the integrator does by plotting the numerically integrated orbit (black) and the analytic solution (red).
End of explanation
"""
dt_ = 1*u.hr
N = int(age/dt_)
dt = dt_.si.value
print('{} timesteps'.format(N))
%timeit -n1000 orbit_ = streakline.orbit(x, v, params, potential, integrator, N, dt, sign)
"""
Explanation: Numerical orbit agrees with the analytic fairly well for this time step size. Explore what happens when you change it!
End of explanation
"""
|
maojrs/riemann_book | Acoustics_heterogeneous.ipynb | bsd-3-clause | %matplotlib inline
%config InlineBackend.figure_format = 'svg'
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import widgets, interact
from exact_solvers import acoustics_heterogeneous, acoustics_heterogeneous_demos
from utils import riemann_tools
import seaborn as sns
sns.set_style('white',{'legend.frameon':'True'});
"""
Explanation: Acoustics in heterogeneous media
End of explanation
"""
# Initial states [pressure, velocity]
ql = [25.0, 15.0]
qr = [10.0,-15.0]
# Acoustic eq. parameters [rho, bulk(K)]
paramsl = [1.0, 0.5]
paramsr = [5.0, 3.0]
acoustics_heterogeneous_demos.interactive_phase_plane(ql,qr,paramsl,paramsr)
"""
Explanation: We would like to model acoustic waves propagation through heterogeneous media, like non-homogeneous materials, layered media or any kind of interface, like walls. The materials in acoustic equations are modeled by the density and bulk modulus coefficients. The natural generalization of constant-coefficient acoustic equations is to have spatially dependent coefficients, so the equations take the form $q_t+A(x)q_x=0$. The explicit form is given by
\begin{align}
\left[ \begin{array}{c}
p \
u
\end{array} \right]_t
+ \left[ \begin{array}{cc}
0 & K(x) \
1/\rho(x) & 0 \
\end{array} \right]
\left[ \begin{array}{c}
p \
u \end{array} \right]_x = 0,
\end{align}
with $p$ and $u$ the pressure and velocity and $\rho(x)$ and $K(x)$ the spatially dependent density and bulk modulus of compressibility. Note this equation is in non-conservative form. An acoustics equation for heterogeneous media in conservative form can be derived in terms of the momentum and strain. However, that is a particular case of the elasticity equations, which are explored in detail in other sections of this book. Furthermore, it is convenient to use the pressure and velocity as variables since they are continuous at interfaces between materials, and they are also more physically intuitively. It is also important to recognize that more complicated systems emerging in applications might not be written in conservation form. Therefore, studying these problems might provide insight and algorithms on how to solve more complicated cases.
We proceed to do the usual analysis. The eigenvalues of the coefficient matrix $A$ are $\pm c(x)$, and the matrix of column eigenvalues is
\begin{align}
R(x) =
\left[ \begin{array}{ccccc}
-Z(x) & Z(x) \
1 & 1 \
\end{array} \right].
\end{align}
where
\begin{align}
c(x) &= \sqrt{\frac{K(x)}{\rho(x)}}, \
Z(x) &= \rho(x) c(x) = \sqrt{K(x)\rho(x)}
\end{align}
are the spatially dependent sound speed and impedance.
In order to solve these equations, we need to do a numerical discretization. Following the spirit of finite volume methods, we approximate $\rho(x)$ and $K(x)$ by piecewise constant functions that are constant in any given grid cell. Therefore, if we can solve the Riemann problem across two given cells, we can extrapolate the solution to the whole grid using standard finite volume method techniques, see <cite data-cite="fvmhp"><a href="riemann.html#fvmhp">(LeVeque 2002)</a></cite>. The Riemann problem to solve consists of the acoustic equations Riemann problem with discontinuous initial data and coefficients (discontinuous $\rho$ and $K$). Once this problem is solved it can be used to approximate a continuous density varying material or other similar examples. As the value of $\rho$ and $K$ is different on the left side than on the right side, the eigenvalues and eigenvectors are as follows. The eigenvalues will be given by the sound speed in each of the two mediums,
\begin{align}
s_l = -c_l \ \ \ \ \ s_r = c_r \ \ \ \ \ \mathrm{with:} \ \ \ \ \ c_i = \sqrt{\frac{K_{i}}{\rho_{i}}},
\label{eq:achetero}
\end{align}
and the eigenvalues by the impedances of each medium as well, so we can write the matrix of column eigenvectors $R=[r_1, r_2]$ as,
\begin{align}
R =
\left[ \begin{array}{ccccc}
-Z_{l} & Z_{r} \
1 & 1 \
\end{array} \right].
\end{align}
Once again, we only need to solve $\mathbf{R} \bar{\alpha} = \Delta \bar{q}$, which yields the values
of $\alpha$
\begin{align}
\alpha_1 = \frac{-\Delta p + Z_r\Delta u}{Z_l + Z_r}, \ \ \ \ \ \
\alpha_2 = \frac{\Delta p + Z_l\Delta u}{Z_l + Z_r}.
\end{align}
The middle state is again given simply by $q_m = q_\ell + \alpha_1 r_1 = q_r - \alpha_2 r_2$.
Interactive solution in the phase plane
This interactive plots allows you to change all of the parameters, as well as the left and right density and bulk modulus so their influence in the phase plane solution can be obersved.
End of explanation
"""
ql = np.array([5,0])
qr = np.array([1,0])
rhol, rhor = 1.0, 20.0 # left and right density
bulkl, bulkr = 4.0, 15.0 # left and right bulk modulus
auxl = [rhol, bulkl]
auxr = [rhor, bulkr]
interact(acoustics_heterogeneous.riemann_plot_func(ql,qr,auxl, auxr), t=widgets.FloatSlider(value=0.0,min=0,max=1.0),
which_char=widgets.Dropdown(options=[None,1,2],description='Show characteristics'));
"""
Explanation: Examples
We will show some examples of where this Riemann problem becomes relevant. As in the previous case, we will begin by defining a function to do the interactive plotting for the different cases.
Problem 3: Shock tube with interface
We repeat the shock tube problem for acoustics but now with two materials. The material properties in the acoustic equations are defined by the density and bulk modulus. Therefore, we can solve the acoustics Riemann problem for two materials by simply choosing different densities and bulk modulus and on the left and on the right. This Riemann problem is fundamental to model acoustic wave propagation across interfaces or heterogenous materials. Note the symmetry of the wave speeds is lost since the eigenvalues are the sound speed and the sound speed depends on the material, i.e. on the density and bulk modulus, like shown in equation (\ref{eq:achetero}). Also note the characteristics bend when crossing the origin. This is a consequence of having differente materials on the left and right sides since different materials will yield different sound speeds.
End of explanation
"""
acoustics_heterogeneous_demos.phase_plane_plot()(ql[0],ql[1],qr[0],qr[1],rhol,rhor,bulkl,bulkr,ymin=-2,ymax=2)
"""
Explanation: The solution in the phase plane is
End of explanation
"""
patm = 101325.0
ql = np.array([patm,340])
qr = np.array([patm,0])
rhol, rhor = 1.0, 8000.0 # left and right density
bulkl, bulkr = 100000.0, 160000000000.0 # left and right bulk modulus
auxl = [rhol, bulkl]
auxr = [rhor, bulkr]
interact(acoustics_heterogeneous.riemann_plot_func(ql,qr,auxl, auxr), t=widgets.FloatSlider(value=0.0,min=0,max=1.0),
which_char=widgets.Dropdown(options=[None,1,2],description='Show characteristics'));
"""
Explanation: Problem 4: Acoustic propagation through a wall
In a previous example, we showed the flow into a wall, which basically models the wall as a completely reflective surface. In most cases, this is a good approximation for the reflected waves; however, we could also ask what is the propagated acoustic wave through the wall. We can answer this question by using the air's bulk modulus and density in the right side and the wall's density and bulk modulus on the right. Air actually has density of $\rho \approx 1 kg/m^3$ and $K\approx 100 kPa$, steel on the other hand has $\rho\approx 8000 kg/m^3$ and $K=160 GPa$. Considering the atmospheric pressure to be $p_{atm} = 101325 Pa$, and an acoustic wave hitting the steel at $340 m/s$, we have all the parameters. As expected you will notice the acoustic wave
on the steel propagates extremely faster than in the air, which is around $5000 m/s$, around 14 times faster than in air.
End of explanation
"""
|
turbomanage/training-data-analyst | quests/rl/dqn/dqns_on_gcp.ipynb | apache-2.0 | %%bash
BUCKET=<your-bucket-here> # Change to your bucket name
JOB_NAME=dqn_on_gcp_$(date -u +%y%m%d_%H%M%S)
REGION='us-central1' # Change to your bucket region
IMAGE_URI=gcr.io/qwiklabs-resources/rl-qwikstart/dqn_on_gcp@sha256:326427527d07f30a0486ee05377d120cac1b9be8850b05f138fc9b53ac1dd2dc
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--region=$REGION \
--master-image-uri=$IMAGE_URI \
--scale-tier=BASIC_GPU \
--job-dir=gs://$BUCKET/$JOB_NAME \
--config=hyperparam.yaml
"""
Explanation: DQNs on GCP
Reinforcement Learning (RL) Agents can be quite fickle. This is because the environment for an Agent is different than that of Supervised and Unsupervised algorithms.
| Supervised / Unsupervised | Reinforcement Learning |
| ----------- | ----------- |
| Data is previously gathered | Data needs to be simulated |
| Big Data: Many examples covering many siutations | Sparse Data: Agent trades off between exploring and exploiting |
| The environment is assumed static | The environment may change in response to the agent |
Because of this, hyperparameter tuning is even more crucial in RL as it not only impacts the training of the agent's neural network, but it also impacts how the data is gathered through simulation.
Setup
Hypertuning takes some time, and in this case, it can take anywhere between 10 - 30 minutes. If this hasn't been done already, run the cell below to kick off the training job now. We'll step through what the code is doing while our agents learn.
End of explanation
"""
!python3 -m pip freeze | grep gym || python3 -m pip install --user gym==0.12.5
!python3 -m pip freeze | grep 'tensorflow==2\|tensorflow-gpu==2' || \
python3 -m pip install --user tensorflow==2
"""
Explanation: The above command sends a hyperparameter tuning job to the Google Cloud AI Platform. It's a service that sets up scaling distributed training so data scientists and machine learning engineers do not have to worry about technical infrastructure. Usually, it automatically selects the container environment, but we're going to take advantage of a feature to specify our own environment with Docker. Not only will this allow us to install our game environment to be deployed to the cloud, but it will also significantly speed up hyperparameter tuning time as each worker can skip the library installation steps.
The <a href="Dockerfile">Dockerfile</a> in this directory shows the steps taken to build this environment. First, we copy from a Google Deep Learning Container which already has Google Cloud Libraries installed. Then, we install our other desired modules and libraries. ffmpeg, xvfb, and python-opengl are needed in order to get video output from the server. Machines on the cloud don't typically have a display (why would they need one?), so we'll make a virtual display of our own.
After we copy our code, we tell the container to be configured as an executable so we can pass our hyperparameter tuning flags to it with the ENTRYPOINT command. In order to set up our virtual display, we can use the xvfb-run command. Unfortunately, Docker strips quotes from specified commands in ENTRYPOINT, so we'll make a super simple shell script, <a href="train_model.sh">train_model.sh</a>, to specify our virtual display parameters. The "@" parameter is used to pass the flags called against the container to our python module, trainer.trainer.
CartPole-v0
So what is the game we'll be solving for? We'll be playing with AI Gym's CartPole Environment. As MNIST is the "Hello World" of image classification, CartPole is the "Hello World" of Deep Q Networks. Let's install OpenAI Gym and play with the game ourselves!
End of explanation
"""
from collections import deque
import random
import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
env = gym.make('CartPole-v0')
print("The observation space is", env.observation_space)
print("The observation dimensions are", env.observation_space.shape)
print("The action space is", env.action_space)
print("The number of possible actions is", env.action_space.n)
"""
Explanation: Note: Restart the kernel if the above libraries needed to be installed
The gym library hosts a number of different gaming environments that our agents (and us humans) can play around in. To make an environment, we simply need to pass it what game we'd like to play with the make method.
This will create an environment object with a number of useful methods and properties.
* The observation_space parameter is the structure of observations about the environment.
- Each "state" or snapshot or our environment will follow this structure
* The action_space parameter is the possible actions the agent can take
So for example, with CartPole, there are 4 observation dimensions which represent [Cart Position, Cart Velocity, Pole Angle, Pole Velocity At Tip]. For the actions, there are 2 possible actions to take: 0 pushes the cart to the left, and 1 pushes the cart to the right. More detail is described in the game's code here.
End of explanation
"""
def print_state(state, step, reward=None):
format_string = 'Step {0} - Cart X: {1:.3f}, Cart V: {2:.3f}, Pole A: {3:.3f}, Pole V:{4:.3f}, Reward:{5}'
print(format_string.format(step, *tuple(state), reward))
state = env.reset()
step = 0
print_state(state, step)
action = 0
state_prime, reward, done, info = env.step(action)
step += 1
print_state(state_prime, step, reward)
print("The game is over." if done else "The game can continue.")
print("Info:", info)
"""
Explanation: The reset method will restart the environment and return a starting state.
The step method takes an action, applies it to the environment and returns a new state. Each step returns a new state, the transition reward, whether the game is over or not, and game specific information. For CartPole, there is no extra info, so it returns a blank dictionary.
End of explanation
"""
action = 1 # Change me: 0 Left, 1 Right
state_prime, reward, done, info = env.step(action)
step += 1
print_state(state_prime, step, reward)
print("The game is over." if done else "The game can continue.")
"""
Explanation: Run the cell below repeatedly until the game is over, changing the action to push the cart left (0) or right (1). The game is considered "won" when the pole can stay up for an average of steps 195 over 100 games. How far can you get? An agent acting randomly can only survive about 10 steps.
End of explanation
"""
# [0, 1, 0, 1, 0, 1, ...]
actions = [x % 2 for x in range(200)]
state = env.reset()
step = 0
episode_reward = 0
done = False
while not done and step < len(actions):
action = actions[step] # In the future, our agents will define this.
state_prime, reward, done, info = env.step(action)
episode_reward += reward
step += 1
state = state_prime
print_state(state, step, reward)
end_statement = "Game over!" if done else "Ran out of actions!"
print(end_statement, "Score =", episode_reward)
"""
Explanation: We can make our own policy and create a loop to play through an episode (one full simulation) of the game. Below, actions are generated to alternate between pushing the cart left and right. The code is very similar to how our agents will be interacting with the game environment.
End of explanation
"""
def deep_q_network(
state_shape, action_size, learning_rate, hidden_neurons):
"""Creates a Deep Q Network to emulate Q-learning.
Creates a two hidden-layer Deep Q Network. Similar to a typical nueral
network, the loss function is altered to reduce the difference between
predicted Q-values and Target Q-values.
Args:
space_shape: a tuple of ints representing the observation space.
action_size (int): the number of possible actions.
learning_rate (float): the nueral network's learning rate.
hidden_neurons (int): the number of neurons to use per hidden
layer.
"""
state_input = layers.Input(state_shape, name='frames')
actions_input = layers.Input((action_size,), name='mask')
hidden_1 = layers.Dense(hidden_neurons, activation='relu')(state_input)
hidden_2 = layers.Dense(hidden_neurons, activation='relu')(hidden_1)
q_values = layers.Dense(action_size)(hidden_2)
masked_q_values = layers.Multiply()([q_values, actions_input])
model = models.Model(
inputs=[state_input, actions_input], outputs=masked_q_values)
optimizer = tf.keras.optimizers.RMSprop(lr=learning_rate)
model.compile(loss='mse', optimizer=optimizer)
return model
"""
Explanation: It's a challenge to get to 200! We could repeatedly experiment to find the best heuristics to beat the game, or we could leave all that work to the robot. Let's create an intelligence to figure this out for us.
The Theory Behind Deep Q Networks
The fundamental principle behind RL is we have two entities: the agent and the environment. The agent takes state and reward information about the envionment and chooses an action. The environment takes that action and will change to be in a new state.
<img src="images/agent_and_environment.jpg" width="476" height="260">
RL assumes that the environment follows a Markov Decision Process (MDP). That means the state is dependent partially on the agent's actions, and partially on chance. MDPs can be represented by a graph, with states and actions as nodes, and rewards and path probabilities on the edges.
<img src="images/mdp.jpg" width="471" height="243">
So what would be the best path through the graph above? Or perhaps a more difficult question, what would be our expected winnings if we played optimally? The probability introduced in this problem has inspired multiple strategies over the years, but all of them boil down to the idea of discounted future rewards.
Would you rather have $100 now or $105 a year from now? With inflation, there's no definitive answer, but each of us has a threshold that we use to determine the value of something now versus the value of something later. In psychology, this is called Delayed Gratification. Richard E. Bellman expressed this theory in an equation widely used in RL called the Bellman Equation. Let's introduce some vocab to better define it.
| Symbol | Name | Definition | Example |
| - | - | - | - |
| | agent | An entity that can act and transition between states | Us when we play CartPole |
| s | state | The environmental parameters describing where the agent is | The position of the cart and angle of the pole |
| a | action | What the agent can do within a state | Pushing the cart left or right |
| t | time / step | One transition between states | One push of the cart |
|| episode | One full simulation run | From the start of the game to game over |
| v, V(s) | value | How much a state is worth | V(last state dropping the pole) = 0
| r, R(s, a) | reward | Value gained or lost transitioning between states through an action | R(keeping the pole up) = 1 |
| γ | gamma | How much to value a current state based on a future state | Coming up soon |
| 𝜋, 𝜋(s) | policy |The recommended action to the agent based on the current state | π(in trouble) = honesty |
Bellman realized this: The value of our current state should the discounted value of the next state the agent will be in plus any rewards picked up along the way, given the agent takes the best action to maximize this.
Using all the symbols from above, we get:
<img src="images/bellman_equation.jpg" width="260" height="50">
However, this is assuming we know all the states, their corresponding actions, and their rewards. If we don't know this in advance, we can explore and simulate this equation with what is called the Q equation:
<img style="background-color:white;" src="https://wikimedia.org/api/rest_v1/media/math/render/svg/47fa1e5cf8cf75996a777c11c7b9445dc96d4637">
Here, the value function is replaced with the Q value, which is a function of a state and action. The learning rate is how much we want to change our old Q value with new information found during simulation. Visually, this results in a Q-table, where rows are the states, actions are the columns, and each cell is the value found through simulation.
|| Meal | Snack | Wait |
|-|-|-|-|
| Hangry | 1 | .5 | -1 |
| Hungry | .5 | 1 | 0 |
| Full | -1 | -.5 | 1.5 |
So this is cool and all, but how exactly does this fit in with CartPole? Here, MDPs are discrete states. CartPole has multidimensional states on a continuous scale. This is where neural networks save the day! Rather than categorize each state, we can feed state properties into our network. By having the same number of output nodes as possible actions, our network can be used to predict the value of the next state given the current state and action.
Building the Agent
These networks can be configured with the same architectures and tools as other problems, such as CNNs. However, the one gotcha is that uses a specialized loss function. We'll instead be using the derivative of the Bellman Equation. Let's go ahead and define our model function as it is in trainer/model.py
End of explanation
"""
class Memory():
"""Sets up a memory replay buffer for a Deep Q Network.
A simple memory buffer for a DQN. This one randomly selects state
transitions with uniform probability, but research has gone into
other methods. For instance, a weight could be given to each memory
depending on how big of a difference there is between predicted Q values
and target Q values.
Args:
memory_size (int): How many elements to hold in the memory buffer.
batch_size (int): The number of elements to include in a replay batch.
gamma (float): The "discount rate" used to assess Q values.
"""
def __init__(self, memory_size, batch_size, gamma):
self.buffer = deque(maxlen=memory_size)
self.batch_size = batch_size
self.gamma = gamma
def add(self, experience):
"""Adds an experience into the memory buffer.
Args:
experience: a (state, action, reward, state_prime, done) tuple.
"""
self.buffer.append(experience)
def sample(self):
"""Uniformally selects from the replay memory buffer.
Uniformally and randomly selects experiences to train the nueral
network on. Transposes the experiences to allow batch math on
the experience components.
Returns:
(list): A list of lists with structure [
[states], [actions], [rewards], [state_primes], [dones]
]
"""
buffer_size = len(self.buffer)
index = np.random.choice(
np.arange(buffer_size), size=self.batch_size, replace=False)
# Columns have different data types, so numpy array would be awkward.
batch = np.array([self.buffer[i] for i in index]).T.tolist()
states_mb = tf.convert_to_tensor(np.array(batch[0], dtype=np.float32))
actions_mb = np.array(batch[1], dtype=np.int8)
rewards_mb = np.array(batch[2], dtype=np.float32)
states_prime_mb = np.array(batch[3], dtype=np.float32)
dones_mb = batch[4]
return states_mb, actions_mb, rewards_mb, states_prime_mb, dones_mb
"""
Explanation: Notice any other atypical aspects of this network?
Here, we take in both state and actions as inputs to our network. The states are fed in as normal, but the actions are used to "mask" the output. This is actually used for faster training, as we'd only want to update the nodes correspnding to the action that we simulated.
The Bellman Equation actually isn't in the network. That's because this is only the "brain" of our agent. As an intelligence, it has much more! Before we get to how exactly the agent learns, let's looks at the other aspects of its body: "Memory" and "Exploration".
Just like other neural network algorithms, we need data to train on. However, this data is the result of our simulations, not something previously stored in a table. Thus, we're going to give our agent a memory where we can store state - action - new state transitions to learn on.
Each time the agent takes a step in gym, we'll save (state, action, reward, state_prime, done) to our buffer, which is defined like so.
End of explanation
"""
test_memory_size = 20
test_batch_size = 4
test_gamma = .9 # Unused here. For learning.
test_memory = Memory(test_memory_size, test_batch_size, test_gamma)
actions = [x % 2 for x in range(200)]
state = env.reset()
step = 0
episode_reward = 0
done = False
while not done and step < len(actions):
action = actions[step] # In the future, our agents will define this.
state_prime, reward, done, info = env.step(action)
episode_reward += reward
test_memory.add((state, action, reward, state_prime, done)) # New line here
step += 1
state = state_prime
print_state(state, step, reward)
end_statement = "Game over!" if done else "Ran out of actions!"
print(end_statement, "Score =", episode_reward)
"""
Explanation: Let's make a fake buffer and play around with it! We'll add the memory into our game play code to start collecting experiences.
End of explanation
"""
test_memory.sample()
"""
Explanation: Now, let's sample the memory by running the cell below multiple times. It's different each call, and that's on purpose. Just like with other neural networks, it's important to randomly sample so that our agent can learn from many different situations.
The use of a memory buffer is called Experience Replay. The above technique of a uniform random sample is a quick and computationally efficient way to get the job done, but RL researchers often look into other sampling methods. For instance, maybe there's a way to weight memories based on their rarity or loss when the agent learns with it.
End of explanation
"""
class Partial_Agent():
"""Sets up a reinforcement learning agent to play in a game environment."""
def __init__(self, network, memory, epsilon_decay, action_size):
"""Initializes the agent with DQN and memory sub-classes.
Args:
network: A neural network created from deep_q_network().
memory: A Memory class object.
epsilon_decay (float): The rate at which to decay random actions.
action_size (int): The number of possible actions to take.
"""
self.network = network
self.action_size = action_size
self.memory = memory
self.epsilon = 1 # The chance to take a random action.
self.epsilon_decay = epsilon_decay
def act(self, state, training=False):
"""Selects an action for the agent to take given a game state.
Args:
state (list of numbers): The state of the environment to act on.
traning (bool): True if the agent is training.
Returns:
(int) The index of the action to take.
"""
if training:
# Random actions until enough simulations to train the model.
if len(self.memory.buffer) >= self.memory.batch_size:
self.epsilon *= self.epsilon_decay
if self.epsilon > np.random.rand():
print("Exploration!")
return random.randint(0, self.action_size-1)
# If not acting randomly, take action with highest predicted value.
print("Exploitation!")
state_batch = np.expand_dims(state, axis=0)
predict_mask = np.ones((1, self.action_size,))
action_qs = self.network.predict([state_batch, predict_mask])
return np.argmax(action_qs[0])
"""
Explanation: But before the agent has any memories and has learned anything, how is it supposed to act? That comes down to Exploration vs Exploitation. The trouble is that in order to learn, risks with the unknown need to be made. There's no right answer, but there is a popular answer. We'll start by acting randomly, and over time, we will slowly decay our chance to act randomly.
Below is a partial version of the agent.
End of explanation
"""
state = env.reset()
# Define "brain"
space_shape = env.observation_space.shape
action_size = env.action_space.n
# Feel free to play with these
test_learning_rate = .2
test_hidden_neurons = 10
test_epsilon_decay = .95
test_network = deep_q_network(
space_shape, action_size, test_learning_rate, test_hidden_neurons)
test_agent = Partial_Agent(
test_network, test_memory, test_epsilon_decay, action_size)
"""
Explanation: Let's define the agent and get a starting state to see how it would act without any training.
End of explanation
"""
action = test_agent.act(state, training=True)
print("Push Right" if action else "Push Left")
"""
Explanation: Run the cell below multiple times. Since we're decaying the random action rate after every action, it's only a matter a time before the agent exploits more than it explores.
End of explanation
"""
def learn(self):
"""Trains the Deep Q Network based on stored experiences."""
batch_size = self.memory.batch_size
if len(self.memory.buffer) < batch_size:
return None
# Obtain random mini-batch from memory.
state_mb, action_mb, reward_mb, next_state_mb, done_mb = (
self.memory.sample())
# Get Q values for next_state.
predict_mask = np.ones(action_mb.shape + (self.action_size,))
next_q_mb = self.network.predict([next_state_mb, predict_mask])
next_q_mb = tf.math.reduce_max(next_q_mb, axis=1)
# Apply the Bellman Equation
target_qs = (next_q_mb * self.memory.gamma) + reward_mb
target_qs = tf.where(done_mb, reward_mb, target_qs)
# Match training batch to network output:
# target_q where action taken, 0 otherwise.
action_mb = tf.convert_to_tensor(action_mb, dtype=tf.int32)
action_hot = tf.one_hot(action_mb, self.action_size)
target_mask = tf.multiply(tf.expand_dims(target_qs, -1), action_hot)
return self.network.train_on_batch(
[state_mb, action_hot], target_mask, reset_metrics=False
)
Partial_Agent.learn = learn
test_agent = Partial_Agent(
test_network, test_memory, test_epsilon_decay, action_size)
"""
Explanation: Memories, a brain, and a healthy dose of curiosity. We finally have all the ingredient for our agent to learn. After all, as the Scarecrow from the Wizard of Oz said:
"Everything in life is unusual until you get accustomed to it."
~L. Frank Baum
Below is the code used by our agent to learn, where the Bellman Equation at last makes an appearance. We'll run through the following steps.
Pull a batch from memory
Get the Q value (the output of the neural network) based on the memory's ending state
Assume the Q value of the action with the highest Q value (test all actions)
Update these Q values with the Bellman Equation
target_qs = (next_q_mb * self.memory.gamma) + reward_mb
If the state is the end of the game, set the target_q to the reward for entering the final state.
Reshape the target_qs to match the networks output
Only learn on the memory's corresponding action by setting all action nodes to zero besides the action node taken.
Fit Target Qs as the label to our model against the memory's starting state and action as the inputs.
End of explanation
"""
state = env.reset()
step = 0
episode_reward = 0
done = False
while not done:
action = test_agent.act(state, training=True)
state_prime, reward, done, info = env.step(action)
episode_reward += reward
test_agent.memory.add((state, action, reward, state_prime, done)) # New line here
step += 1
state = state_prime
print_state(state, step, reward)
print(test_agent.learn())
print("Game over! Score =", episode_reward)
"""
Explanation: Nice! We finally have an intelligence that can walk and talk and... well ok, this intelligence is too simple to be able to do those things, but maybe it can learn to push a cart with a pole on it. Let's update our training loop to use our new agent.
Run the below cell over and over up to ten times to train the agent.
End of explanation
"""
def _parse_arguments(argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--game',
help='Which open ai gym game to play',
type=str,
default='CartPole-v0')
parser.add_argument(
'--episodes',
help='The number of episodes to simulate',
type=int,
default=200)
parser.add_argument(
'--learning_rate',
help='Learning rate for the nueral network',
type=float,
default=0.2)
parser.add_argument(
'--hidden_neurons',
help='The number of nuerons to use per layer',
type=int,
default=30)
parser.add_argument(
'--gamma',
help='The gamma or "discount" factor to discount future states',
type=float,
default=0.5)
parser.add_argument(
'--explore_decay',
help='The rate at which to decay the probability of a random action',
type=float,
default=0.1)
parser.add_argument(
'--memory_size',
help='Size of the memory buffer',
type=int,
default=100000)
parser.add_argument(
'--memory_batch_size',
help='The amount of memories to sample from the buffer while training',
type=int,
default=8)
parser.add_argument(
'--job-dir',
help='Directory where to save the given model',
type=str,
default='models/')
parser.add_argument(
'--print_rate',
help='How often to print the score, 0 if never',
type=int,
default=0)
parser.add_argument(
'--eval_rate',
help="""While training, perform an on-policy simulation and record
metrics to tensorboard every <record_rate> steps, 0 if never. Use
higher values to avoid hyperparameter tuning "too many metrics"
error""",
type=int,
default=20)
return parser.parse_known_args(argv)
"""
Explanation: Hypertuning
Chances are, at this point, the agent is having a tough time learning. Why is that? Well, remember that hyperparameter tuning job we kicked off at the start of this notebook?
The are many parameters that need adjusting with our agent. Let's recap:
* The number of episodes or full runs of the game to train on
* The neural networks learning_rate
* The number of hidden_neurons to use in our network
* gamma, or how much we want to discount the future value of states
* How quickly we want to switch from explore to exploit with explore_decay
* The size of the memory buffer, memory_size
* The number of memories to pull from the buffer when training, memory_batch_size
These all have been added as flags to pass to the model in trainer/trainer.py's _parse_arguments method. For the most part, trainer/trainer.py follows the structure of the training loop that we have above, but it does have a few extra bells and whistles, like a hook into TensorBoard and video output.
End of explanation
"""
|
sonyahanson/assaytools | examples/ipynbs/data-analysis/hsa/analyzing_FLU_hsa_lig2_20150922.ipynb | lgpl-2.1 | import numpy as np
import matplotlib.pyplot as plt
from lxml import etree
import pandas as pd
import os
import matplotlib.cm as cm
import seaborn as sns
%pylab inline
# Get read and position data of each fluorescence reading section
def get_wells_from_section(path):
reads = path.xpath("*/Well")
wellIDs = [read.attrib['Pos'] for read in reads]
data = [(float(s.text), r.attrib['Pos'])
for r in reads
for s in r]
datalist = {
well : value
for (value, well) in data
}
welllist = [
[
datalist[chr(64 + row) + str(col)]
if chr(64 + row) + str(col) in datalist else None
for row in range(1,9)
]
for col in range(1,13)
]
return welllist
file_lig="MI_FLU_hsa_lig2_20150922_164254.xml"
file_name = os.path.splitext(file_lig1)[0]
label = file_name[0:25]
print label
root = etree.parse(file_lig)
#find data sections
Sections = root.xpath("/*/Section")
much = len(Sections)
print "****The xml file " + file_lig + " has %s data sections:****" % much
for sect in Sections:
print sect.attrib['Name']
#Work with topread
TopRead = root.xpath("/*/Section")[0]
welllist = get_wells_from_section(TopRead)
df_topread = pd.DataFrame(welllist, columns = ['A - HSA','B - Buffer','C - HSA','D - Buffer', 'E - HSA','F - Buffer','G - HSA','H - Buffer'])
df_topread.transpose()
# To generate cvs file
# df_topread.transpose().to_csv(label + Sections[0].attrib['Name']+ ".csv")
"""
Explanation: FLUORESCENCE BINDING ASSAY ANALYSIS
Experiment date: 2015/09/22
Protein: HSA
Fluorescent ligand : dansyl glycine(lig2)
Xml parsing parts adopted from Sonya's assaytools/examples/fluorescence-binding-assay/Src-gefitinib fluorescence simple.ipynb
End of explanation
"""
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
%matplotlib inline
def model(x,slope,intercept):
''' 1D linear model in the format scipy.optimize.curve_fit expects: '''
return x*slope + intercept
# generate some data
#X = np.random.rand(1000)
#true_slope=1.0
#true_intercept=0.0
#noise = np.random.randn(len(X))*0.1
#Y = model(X,slope=true_slope,intercept=true_intercept) + noise
#ligand titration
lig2=np.array([200.0000,86.6000,37.5000,16.2000,7.0200, 3.0400, 1.3200, 0.5700, 0.2470, 0.1070, 0.0462, 0.0200])
lig2
# Since I have 4 replicates
L=np.concatenate((lig2, lig2, lig2, lig2))
len(L)
# Fluorescence read
df_topread.loc[:,("B - Buffer", "D - Buffer", "F - Buffer", "H - Buffer")]
B=df_topread.loc[:,("B - Buffer")]
D=df_topread.loc[:,("D - Buffer")]
F=df_topread.loc[:,("F - Buffer")]
H=df_topread.loc[:,("H - Buffer")]
Y = np.concatenate((B.as_matrix(),D.as_matrix(),F.as_matrix(),H.as_matrix()))
(MF,BKG),_ = optimize.curve_fit(model,L,Y)
print('MF: {0:.3f}, BKG: {1:.3f}'.format(MF,BKG))
print('y = {0:.3f} * L + {1:.3f}'.format(MF, BKG))
"""
Explanation: Calculating Molar Fluorescence (MF) of Free Ligand
1. Maximum likelihood curve-fitting
Find the maximum likelihood estimate, $\theta^$, i.e. the curve that minimizes the squared error $\theta^ = \text{argmin} \sum_i |y_i - f_\theta(x_i)|^2$ (assuming i.i.d. Gaussian noise)
Y = MF*L + BKG
Y: Fluorescence read (Flu unit)
L: Total ligand concentration (uM)
BKG: background fluorescence without ligand (Flu unit)
MF: molar fluorescence of free ligand (Flu unit/ uM)
End of explanation
"""
def model2(x,kd,fr):
''' 1D linear model in the format scipy.optimize.curve_fit expects: '''
# lr =((x+rtot+kd)-((x+rtot+kd)**2-4*x*rtot)**(1/2))/2
# y = bkg + mf*(x - lr) + fr*mf*lr
bkg = 86.2
mf = 2.517
rtot = 0.5
return bkg + mf*(x - ((x+rtot+kd)-((x+rtot+kd)**2-4*x*rtot)**(1/2))/2) + fr*mf*(((x+rtot+kd)-((x+rtot+kd)**2-4*x*rtot)**(1/2))/2)
# Total HSA concentration (uM)
Rtot = 0.5
#Total ligand titration
X = L
len(X)
# Fluorescence read
df_topread.loc[:,("A - HSA", "C - HSA", "E - HSA", "G - HSA")]
A=df_topread.loc[:,("A - HSA")]
C=df_topread.loc[:,("C - HSA")]
E=df_topread.loc[:,("E - HSA")]
G=df_topread.loc[:,("G - HSA")]
Y = np.concatenate((A.as_matrix(),C.as_matrix(),E.as_matrix(),G.as_matrix()))
len(Y)
(Kd,FR),_ = optimize.curve_fit(model2, X, Y, p0=(5,1))
print('Kd: {0:.3f}, Fr: {1:.3f}'.format(Kd,FR))
"""
Explanation: Curve-fitting to binding saturation curve
Fluorescence intensity vs added ligand
LR= ((X+Rtot+KD)-SQRT((X+Rtot+KD)^2-4XRtot))/2
L= X - LR
Y= BKG + MFL + FRMF*LR
Constants
Rtot: receptor concentration (uM)
BKG: background fluorescence without ligand (Flu unit)
MF: molar fluorescence of free ligand (Flu unit/ uM)
Parameters to fit
Kd: dissociation constant (uM)
FR: Molar fluorescence ratio of complex to free ligand (unitless)
complex flurescence = FRMFLR
Experimental data
Y: fluorescence measurement
X: total ligand concentration
L: free ligand concentration
End of explanation
"""
|
retnuh/deep-learning | sentiment-rnn/Sentiment_RNN.ipynb | mit | import numpy as np
import tensorflow as tf
with open('../sentiment_network/reviews.txt', 'r') as f:
reviews = f.read()
with open('../sentiment_network/labels.txt', 'r') as f:
labels = f.read()
reviews[:2000]
"""
Explanation: Sentiment Analysis with an RNN
In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.
The architecture for this network is shown below.
<img src="assets/network_diagram.png" width=400px>
Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.
From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.
We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.
End of explanation
"""
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
"""
Explanation: Data preprocessing
The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.
You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string.
First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
End of explanation
"""
# Create your dictionary that maps vocab words to integers here
vocab_to_int = { w: i+1 for i, w in enumerate(sorted(set(words)))}
# Convert the reviews to integers, same shape as reviews list, but with integers
reviews_ints = [ [vocab_to_int[w] for w in review.split()] for review in reviews]
reviews[1]
"""
Explanation: Encoding the words
The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.
Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.
Also, convert the reviews to integers and store the reviews in a new list called reviews_ints.
End of explanation
"""
labels.split('\n')[-10:-1]
# Convert labels to 1s and 0s for 'positive' and 'negative'
label_map = { 'positive': 1, 'negative': 0}
labels = np.array([ label_map[l] for l in labels.split('\n')[:-1]])
"""
Explanation: Encoding the labels
Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.
Exercise: Convert labels from positive and negative to 1 and 0, respectively.
End of explanation
"""
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
"""
Explanation: If you built labels correctly, you should see the next output.
End of explanation
"""
# Filter out that review with 0 length
reviews_ints = [ r for r in reviews_ints if len(r) > 0]
"""
Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.
Exercise: First, remove the review with zero length from the reviews_ints list.
End of explanation
"""
seq_len = 200
def feature_arr(review):
if (len(review) >= seq_len):
return review[:seq_len]
else:
r = review.copy()
while len(r) < seq_len:
r.insert(0,0)
return r
features = np.array([ feature_arr(review) for review in reviews_ints ])
"""
Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.
This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
End of explanation
"""
features[:10,:100]
"""
Explanation: If you build features correctly, it should look like that cell output below.
End of explanation
"""
len(features)
split_frac = 0.8
indices = np.random.permutation(np.arange(len(features)))
split_ind = int(split_frac * len(indices))
train_ind, val_ind = np.split(indices, [split_ind])
print(train_ind[:5])
train_x, val_x = features[train_ind], features[val_ind]
train_y, val_y = labels[train_ind], labels[val_ind]
val_x, test_x = np.split(val_x, 2)
val_y, test_y = np.split(val_y, 2)
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}\t{}".format(val_x.shape, val_y.shape),
"\nTest set: \t\t{}\t{}".format(test_x.shape, test_y.shape))
"""
Explanation: Training, Validation, Test
With our data in nice shape, we'll split it into training, validation, and test sets.
Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.
End of explanation
"""
lstm_size = 256
lstm_layers = 1
batch_size = 500
learning_rate = 0.001
"""
Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2500, 200)
Build the graph
Here, we'll build the graph. First up, defining the hyperparameters.
lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.
lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.
batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.
learning_rate: Learning rate
End of explanation
"""
n_words = len(vocab_to_int)+1
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')
labels_ = tf.placeholder(tf.int32, [None, 1], name='labels')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
"""
Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.
End of explanation
"""
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with graph.as_default():
embedding = tf.Variable(tf.random_uniform([n_words+1, embed_size], minval=-1, maxval=1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
"""
Explanation: Embedding
Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.
Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200].
End of explanation
"""
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
"""
Explanation: LSTM cell
<img src="assets/network_diagram.png" width=400px>
Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.
To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:
tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=<function tanh at 0x109f1ef28>)
you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
Most of the time, your network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.
So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.
Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.
Here is a tutorial on building RNNs that will help you out.
End of explanation
"""
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
Explanation: Validation accuracy
Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
End of explanation
"""
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
"""
Explanation: Batching
This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].
End of explanation
"""
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
"""
Explanation: Training
Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.
End of explanation
"""
test_acc = []
with tf.Session(graph=graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
"""
Explanation: Testing
End of explanation
"""
|
Paradigm4/wearable_prototypes | sleep_python3.ipynb | agpl-3.0 | import scidbpy
import getpass
import requests
import warnings
warnings.filterwarnings("ignore")
#requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
db = scidbpy.connect(scidb_url="http://localhost:8080")
"""
Explanation: SciDB and Machine Learning on Wearable Data
This work is motivated by the following publication out of the IHI 2012 - 2ND ACM SIGHIT International Health Informatics Symposium: https://dl.acm.org/citation.cfm?doid=2110363.2110375
The authors explored ways to detect sleep using a wrist-worn accelerometer and light sensor. In this Notebook, we explore their data loaded in SciDB via a few queries and analytics. We'll start off by running simple fetch and summary queries to familiarize ourselves with the data. Then we'll explore ways to use SciDB streaming to bring analytics to the data and execute data-wide computations in the cluster. Finally, we'll build towards a machine learning algorithm that can detect sleep with some accuracy.
The objective of this work is not so much to train a good model, but to demonstrate the SciDB-powered workflow itself. The key take-away is that SciDB helps easily orchestrate parallelism for complex calculations that can be expressed in popular languages like R and Python.
1. Connect and Explore
The Usual AMI password is 'Paradigm4' - enter it when prompted.
End of explanation
"""
ihi_schema = db.show(db.arrays.IHI_DATA)[:]['schema'][0]
ihi_schema
db.summarize(db.arrays.IHI_DATA)[:]
db.limit(db.arrays.IHI_DATA, 5)[:]
"""
Explanation: We loaded all of the data from the paper into a single array, indexed by
* subject: a simple numeric identifier,
* day: a simple counter starting at day 0 for each subject and
* mil: the number of milliseconds elapsed since start of day (0 to 86,400,000).
Each cell in the array has the following attributes:
* acc_x,y,z: the 3D accelerometer readings
* light: the light sensor output
* sleep: the "ground truth" as to whether or not the subject is sleeping at that time (1 means awake, 2 means asleep)
Let's take a look at a few entries:
End of explanation
"""
#A helper time conversion routine
from datetime import time
def time_to_millis(t):
return int(t.hour * 3600000 + t.minute * 60000 + t.second * 1000 + int(t.microsecond / 1000))
def get_series(subject, day, t_start, t_end):
if type(t_start) is time:
t_start = time_to_millis(t_start)
if type(t_end) is time:
t_end = time_to_millis(t_end)
query = db.filter(db.arrays.IHI_DATA, "subject = {} and day = {} and mil>={} and mil <={}".format(
subject, day, t_start, t_end))
return query[:]
d = get_series(subject=0, day=1, t_start = time(8,30,0), t_end=time(9,30,0))
d.head()
"""
Explanation: 1.1 Quick Fetch and Browse Routines
Because SciDB indexes and clusters data on dimensions, it is efficient to retrieve time slices. Below we'll define a function get_series that will let us quickly retrieve data for specific subject, day and time intervals:
End of explanation
"""
import matplotlib.pyplot as plt
plt.show()
def plot_series(d):
d = d.sort_values(by='mil')
plt.rcParams['figure.figsize'] = (18, 1.5)
d1 =d[['mil','acc_x','acc_y','acc_z']]
d1.plot(x='mil', ylim=(-5,260), title='Accelerometer')
d2 =d[['mil','light']]
d2.plot(x='mil', ylim=(-5,260), title = 'Light Sensor')
d3 =d[['mil','sleep']]
d3.plot(x='mil', ylim=(0.95,2.05), title = 'Sleep (Reported)')
plt.show()
def get_and_plot(subject, day, t_start, t_end):
d = get_series(subject = subject, day = day, t_start = t_start, t_end = t_end)
plot_series(d)
get_and_plot(subject=0, day=1, t_start = time(8,50,0), t_end=time(9,30,0))
"""
Explanation: The timeseries are downloaded as Pandas dataframes and we can easily visualize them. We'll create quick plotter function below and visualize subject 0, day 1, from 8:50 AM to 9:30 AM. Looks like our subject is waking up at right around that time:
End of explanation
"""
get_and_plot(subject=3, day=3, t_start = time(0,0,0), t_end=time(1,0,0))
"""
Explanation: For another example - looks like Subject 3 stayed up late on the morning of Day 3. They go to sleep sometime between midnight and 1 AM:
End of explanation
"""
daily_summary = db.aggregate(
db.apply(
db.arrays.IHI_DATA,
"mil", "mil"
),
"count(*) as num_samples",
"min(mil) as t_start",
"max(mil) as t_end",
"subject", "day"
)[:]
daily_summary.sort_values(by=['subject','day']).head()
"""
Explanation: We can also easily aggregate data into a daily summary. From this we can see most have good coverage but sometimes data is missing. Day 0 usually does not start at midnight.
End of explanation
"""
def binned_activity(d):
import pandas as pd
bin_millis = 60000 * 15
d1 = d[['mil', 'acc_x','acc_y','acc_z']]
d2 = d1.shift(1)
d2.columns = ['mil_0', 'acc_x_0', 'acc_y_0', 'acc_z_0']
dm = pd.concat([d1,d2], axis=1)
dm['activity'] = pow(pow(dm['acc_x'] - dm['acc_x_0'], 2) +
pow(dm['acc_y'] - dm['acc_y_0'], 2) +
pow(dm['acc_z'] - dm['acc_z_0'], 2), 0.5)
dm['bin'] = (dm['mil'] / (bin_millis)).astype(int)
dmm = dm.groupby(['bin'], as_index=False)[['activity']].sum()
dmm['mil'] = dmm['bin'] * bin_millis + (bin_millis/2)
dmm['subject'] = d['subject'][0]
dmm['day'] = d['day'][0]
dmm = dmm[['subject', 'day', 'mil', 'activity']]
return(dmm)
"""
Explanation: 2. Analytics: Computing an Activity Score
Let's try to calculate the total "amount of movement" that the subject is performing. There are many different approaches in the literature: counting the number of times the accelerometer crosses a threshold (ZCM), proportional integration (PIM), time above threshold and so on. It is also recommended to pre-filter the signal to exlude vibrations that are not of human origin. In this particular case we don't have a lot of information about the device (a custom made prototype) nor even what units the acceleration is captured in.
We'll create a simple example function that will add up Euclidean acceleromter distances from the current reading to the previous reading, over a fixed time window (15 minutes). Thus for each 15-minute window, the user gets an "activity" score sum. The score is 0 when the accelerometer series as flat. The more change there is, the higher the score.
Down the road, we'll show how to use streaming to execute the arbitrary supplied function on all data in parallel. We'll then leave the development of a more realistic function to the user:
End of explanation
"""
d = get_series(subject=0, day=1, t_start = time(8,30,0), t_end=time(9,30,0))
dm = binned_activity(d)
print(dm)
plot_series(d)
dm[['mil','activity']].plot(x='mil', color='green', title = "Activity Score",
xlim=(min(d['mil']), max(d['mil']) ))
plt.show(block=True)
"""
Explanation: We can now run our function on an example timeseries and plot it alongside. Our activity score increases as the subject wakes up:
End of explanation
"""
#Remove the array if exists
try:
db.remove(db.arrays.IHI_BINNED_ACTIVITY)
except:
print("Array not found")
"""
Explanation: 2.1 Applying a Function through Streaming
Taking our binned_activity function from above we will now do the following in SciDB:
1. Upload the code for binned_activity to the SicDB cluster
2. In parallel, run binned_activity on every subject, ouputting the activity for every 15-minute period
3. Gather and store results as a new array IHI_BINNED_ACTIVITY
SciDB makes this quite straightforward, modulo a few small aspects. SciDB streaming will execute the function on one chunk of data at a time, and the IHI_DATA array is chunked into 1-hour intervals. The 15 minute windows evenly divide the hour, thus we won't see any overlap issues. If the window were, say, 23 minutes, we would need to write some extra code to redimension the data prior to streaming.
Note also the import pandas as pd line is inside the body of the function. This is not common but will do the right thing: Python is smart enough to import modules only once.
End of explanation
"""
import scidbstrm
scidbstrm.python_map
"""
Explanation: 2.1.1 Notes on Streaming and Python Environments
Very often foks use custom enviroments and additional package managers like Conda. If that's the case, keep in mind that the python process that is invoked by SciDB's stream() is the default Python process for the Linux user that's running the database. Note also that the stream process does not run in an interactive shell. So, typically, executing a python command inside stream will run /usr/bin/python even if Conda is configured otherwise for user scidb.
However, we can easily add some paths for a differnet environment. For example, the scidbstrm package comes with a nice python_map shorthand:
End of explanation
"""
#ETA on this is about 1 minute
import scidbstrm
db_fun = db.input(upload_data=scidbstrm.pack_func(binned_activity),
upload_schema=scidbpy.Schema.fromstring('<x:binary not null>[i]')).store()
db.stream(
db.apply(
db.arrays.IHI_DATA,
"mil, mil",
"subject, subject",
"day, day"
),
scidbstrm.python_map,
"'format=feather'",
"'types=int64,int64,double,double'",
"'names=subject,day,mil,activity'",
'_sg({}, 0)'.format(db_fun.name)
).store(db.arrays.IHI_BINNED_ACTIVITY)
"""
Explanation: To run from a different Python environment, all we need to do is prepend some environment exports:
For more notes about Conda, environments and non-interactive shells, see a helpful post here: https://gist.github.com/datagrok/2199506
For more notes about Streaming and security, see
https://github.com/paradigm4/stream#stability-and-security
We now use our script to run the binned_activity function on all data:
End of explanation
"""
db.show(db.arrays.IHI_BINNED_ACTIVITY)[:]['schema'][0]
db.limit(db.arrays.IHI_BINNED_ACTIVITY, 5).fetch(atts_only=True).sort_values(by=['subject','day'])
"""
Explanation: Notice that after the streaming the array now has "placeholder" dimensions and we've converted our subject and day fields to attributes:
End of explanation
"""
s2_day3_activity = db.filter(
db.arrays.IHI_BINNED_ACTIVITY,
"subject = 2 and day = 3"
)[:]
s2_day3_activity = s2_day3_activity.sort_values(by='mil')
s2_day3_activity['hour'] = s2_day3_activity['mil'] / 3600000
s2_day3_activity[['hour','activity']].plot(x='hour')
plt.show(block=True)
"""
Explanation: We can fetch a whole day's worth of activity for a particular subject. It's also interesting to look at inter-day and inter-subject comparisons of peak activity times:
End of explanation
"""
activity_stats = db.grouped_aggregate(
db.grouped_aggregate(
db.arrays.IHI_BINNED_ACTIVITY,
"sum(activity) as daily_activity", "subject, day"
),
"avg(daily_activity) as avg_daily_activity",
"stdev(daily_activity) as stdev_daily_activity",
"count(*) as num_days",
"subject"
).fetch(atts_only=True)
activity_stats
activity_stats.sort_values(by='subject').plot(y='avg_daily_activity', x='subject', kind ='bar')
plt.show()
"""
Explanation: Finally, we can compare the average activity level across subjects. It's an interesting illustration, however one should keep in mind that it's vulnerable to device-to-device variation as well as missing data:
End of explanation
"""
try:
db.remove(db.arrays.IHI_BINNED_FEATURES)
except:
print("Array not found")
feature_binning_period = 1 * 60 * 1000 #Break up the data into 1-minute bins
db.apply(
db.grouped_aggregate(
db.apply(
db.arrays.IHI_DATA,
"bin_millis", "mil/({p}) * ({p}) + ({p})/2".format(p=feature_binning_period)
),
"sum(light) as total_light",
"var(acc_x) as acc_x_var",
"var(acc_y) as acc_y_var",
"var(acc_z) as acc_z_var",
"max(sleep) as sleep",
"subject, day, bin_millis"
),
"training", "random()%2"
).store(db.arrays.IHI_BINNED_FEATURES)
db.op_count(db.arrays.IHI_BINNED_FEATURES)[:]
"""
Explanation: Our binned_activity function is a very rough prototype but we'll draw attention to how easy it is to modify that function - adding a filter, interpolating, taking a more realistic integral - and re-run on all the data using SciDB.
3. In-Database Machine Learning
We'll build up on the streaming paradigm seen above to execute a machine learning exercise on the data. We'll perform the following:
Compute several binned features on the data - binned variance for accelerometers and the total amount of light as measured by the light sensor
Randomly split the binned features into "training" and "validation" sets
Use the Stochastic Gradient Descent Classifier from scikit-learn to train several models on the training set inside SciDB in Parallel
Combine the trained models into a single Voting Classifier prediction model, store that as an array in SciDB.
Evaluate the model on the remaining "validation" set and compare it to ground truth.
Many of these steps are built on this blog post: http://rvernica.github.io/2017/10/streaming-machine-learning
In fact we use a very similar classifier. Consult that post for additional clarifications.
First, the binning can be done entirely using SciDB aggregation. The splitting into "training" and "validation" is achieved by apply-ing a value to each field that is either 0 or 1.
End of explanation
"""
import scidbstrm
class Train:
model = None
count = 0
@staticmethod
def map(df):
dft = df[['acc_x_var','acc_y_var', 'acc_z_var', 'total_light']]
Train.model.partial_fit(numpy.matrix(dft),
df['sleep'],
[1,2])
Train.count += len(df)
return None
@staticmethod
def finalize():
if Train.count == 0:
return None
buf = io.BytesIO()
sklearn.externals.joblib.dump(Train.model, buf)
return pandas.DataFrame({
'count': [Train.count],
'model': [buf.getvalue()]})
ar_fun = db.input(upload_data=scidbstrm.pack_func(Train),
upload_schema=scidbpy.Schema.fromstring('<x:binary not null>[i]')).store()
#Once again, don't forget our environment variables:
python_run = """'
python3 -uc "
import io
import numpy
import pandas
import scidbstrm
import sklearn.externals
import sklearn.linear_model
Train = scidbstrm.read_func()
Train.model = sklearn.linear_model.SGDClassifier()
scidbstrm.map(Train.map, Train.finalize)
"'"""
que = db.stream(
db.filter(
db.arrays.IHI_BINNED_FEATURES,
#Note: computed variance can be NULL if a bin input segment (1 minute) has only a single value in it
"training=1 and acc_x_var is not null and acc_y_var is not null and acc_z_var is not null"
),
python_run,
"'format=feather'",
"'types=int64,binary'",
"'names=count,model'",
'_sg({}, 0)'.format(ar_fun.name)
).store(
db.arrays.IHI_PARTIAL_MODEL)
"""
Explanation: 3.1 Training the Partial Models
Note the binned dataset is smaller than the original and it's surely possible to download it. Here we'll illustrate an in-DB parallel approach that will scale well for hundreds of such subjects. Note the use of filter with training=1 which will use only the "training" half of the data.
We train the models in parallel:
End of explanation
"""
db.scan(db.arrays.IHI_PARTIAL_MODEL)[:]
"""
Explanation: For each instance that had binned data there's now a model decorated with the number of rows that it was trained on:
End of explanation
"""
scidbstrm.python_map
def merge_models(df):
import io
import pandas
import sklearn.ensemble
import sklearn.externals
estimators = [sklearn.externals.joblib.load(io.BytesIO(byt))
for byt in df['model']]
if not estimators:
return None
labelencoder = sklearn.preprocessing.LabelEncoder()
labelencoder.fit([0,1,2])
model = sklearn.ensemble.VotingClassifier(())
model.estimators_ = estimators
model.le_ = labelencoder
buf = io.BytesIO()
sklearn.externals.joblib.dump(model, buf)
return pandas.DataFrame({'count': df.sum()['count'],
'model': [buf.getvalue()]})
ar_fun = db.input(upload_data=scidbstrm.pack_func(merge_models),
upload_schema=scidbpy.Schema.fromstring('<x:binary not null>[i]')).store()
que = db.unpack(
#The unpack puts all the models into a single chunk (assuming there aren't more than 1M instances)
db.arrays.IHI_PARTIAL_MODEL,
"i",
"10000000"
).stream(
scidbstrm.python_map,
"'format=feather'",
"'types=int64,binary'",
"'names=count,model'",
'_sg({}, 0)'.format(ar_fun.name)
).store(
db.arrays.IHI_FINAL_MODEL)
db.scan(db.arrays.IHI_FINAL_MODEL)[:]
"""
Explanation: 3.2 Combining the Models
In a fashion very similar to Dr. Vernica's blog post, we combine all the partially-trained models:
End of explanation
"""
try:
db.remove(db.arrays.IHI_PREDICTED_SLEEP)
except:
print("Array not found")
class Predict:
model = None
@staticmethod
def map(df):
dfp = numpy.matrix(df[['acc_x_var','acc_y_var', 'acc_z_var', 'total_light']])
#We're creating a new column; Arrow will complain if it's not Unicode:
df[u'pred'] = Predict.model.predict(dfp)
df = df [['subject', 'day', 'bin_millis', 'sleep', 'pred']]
return df
ar_fun = db.input(
upload_data=scidbstrm.pack_func(Predict),
upload_schema=scidbpy.Schema.fromstring('<x:binary not null>[i]')
).cross_join(
db.arrays.IHI_FINAL_MODEL
).store()
python_run = """'
python3 -uc "
import dill
import io
import numpy
import scidbstrm
import sklearn.externals
df = scidbstrm.read()
Predict = dill.loads(df.iloc[0, 0])
Predict.model = sklearn.externals.joblib.load(io.BytesIO(df.iloc[0, 2]))
scidbstrm.write()
scidbstrm.map(Predict.map)
"'"""
que = db.filter(
db.arrays.IHI_BINNED_FEATURES,
"training = 0 and acc_x_var is not null and acc_y_var is not null and acc_z_var is not null"
).stream(
python_run,
"'format=feather'",
"'types=int64,int64,int64,double,int64'",
"'names=subject,day,bin_millis,sleep,prediction'",
'_sg({}, 0)'.format(ar_fun.name)
).store(
db.arrays.IHI_PREDICTED_SLEEP)
"""
Explanation: 3.3 Making Predictions
Now that we have our model, we can use it to make predictions. Below we'll run it on the remainder of the data, filtering for training = 0.
End of explanation
"""
s4d6 = db.filter(db.arrays.IHI_PREDICTED_SLEEP, 'subject=1 and day=8').fetch(atts_only=True)
s4d6 = s4d6.sort_values(by='bin_millis')
s4d6['hour'] = s4d6['bin_millis'] / 3600000
plt.rcParams['figure.figsize'] = (18, 2)
s4d6[['hour','sleep']].plot(x='hour', title = "Sleep (Actual)")
s4d6[['hour','prediction']].plot(x='hour', color='green', title = "Sleep (Predicted)")
plt.show(block=True)
"""
Explanation: 3.4 How did we do?
We can pull out and view the predictions for one subject-day like so. Turns out we're correct most of the time, but there some mis-labels:
End of explanation
"""
result = db.grouped_aggregate(db.arrays.IHI_PREDICTED_SLEEP, "count(*)", "sleep, prediction").fetch(atts_only=True)
result
"""
Explanation: And we can look at every 1-minute bin we have predictions for and compare our predictions to ground truth:
End of explanation
"""
result = db.project(db.arrays.IHI_PREDICTED_SLEEP, "sleep, prediction")[:]
import matplotlib, numpy
def rand_jitter(arr):
return arr + numpy.random.randn(len(arr)) * .2
plt.rcParams['figure.figsize'] = (8, 8)
matplotlib.pyplot.xticks([1,2])
matplotlib.pyplot.yticks([1,2])
matplotlib.pyplot.xlabel('Sleep (Actual)')
matplotlib.pyplot.ylabel('Sleep (Predicted)')
matplotlib.pyplot.plot(
rand_jitter(result['sleep']), rand_jitter(result['prediction']), '.', ms=1)
plt.show()
"""
Explanation: The vast majority of our predictions are accurate but there's room to improve the model. Below is a visualization of the above table. We use randomized jitter to help visualize the relative number of points in each bin:
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_3d_to_2d.ipynb | bsd-3-clause | # Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
from scipy.io import loadmat
import numpy as np
from mayavi import mlab
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
path_data = mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat'
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
"""
Explanation: ====================================================
How to convert 3D electrode positions to a 2D image.
====================================================
Sometimes we want to convert a 3D representation of electrodes into a 2D
image. For example, if we are using electrocorticography it is common to
create scatterplots on top of a brain, with each point representing an
electrode.
In this example, we'll show two ways of doing this in MNE-Python. First,
if we have the 3D locations of each electrode then we can use Mayavi to
take a snapshot of a view of the brain. If we do not have these 3D locations,
and only have a 2D image of the electrodes on the brain, we can use the
:class:mne.viz.ClickableImage class to choose our own electrode positions
on the image.
End of explanation
"""
mat = loadmat(path_data)
ch_names = mat['ch_names'].tolist()
elec = mat['elec']
dig_ch_pos = dict(zip(ch_names, elec))
mon = mne.channels.DigMontage(dig_ch_pos=dig_ch_pos)
info = mne.create_info(ch_names, 1000., 'ecog', montage=mon)
print('Created %s channel positions' % len(ch_names))
"""
Explanation: Load data
First we'll load a sample ECoG dataset which we'll use for generating
a 2D snapshot.
End of explanation
"""
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'], meg=False)
mlab.view(200, 70)
xy, im = snapshot_brain_montage(fig, mon)
# Convert from a dictionary to array to plot
xy_pts = np.vstack(xy[ch] for ch in info['ch_names'])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
"""
Explanation: Project 3D electrodes to a 2D snapshot
Because we have the 3D location of each electrode, we can use the
:func:mne.viz.snapshot_brain_montage function to return a 2D image along
with the electrode positions on that image. We use this in conjunction with
:func:mne.viz.plot_alignment, which visualizes electrode positions.
End of explanation
"""
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # To save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=120, color='r')
plt.autoscale(tight=True)
ax.set_axis_off()
plt.show()
"""
Explanation: Manually creating 2D electrode positions
If we don't have the 3D electrode positions then we can still create a
2D representation of the electrodes. Assuming that you can see the electrodes
on the 2D image, we can use :class:mne.viz.ClickableImage to open the image
interactively. You can click points on the image and the x/y coordinate will
be stored.
We'll open an image file, then use ClickableImage to
return 2D locations of mouse clicks (or load a file already created).
Then, we'll return these xy positions as a layout for use with plotting topo
maps.
End of explanation
"""
|
davicsilva/dsintensive | notebooks/WorldUniversityRankings.ipynb | apache-2.0 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
## CWUR 2016 dataset
datacwur2016 = 'data/cwur2016.csv'
cwur2016 = pd.read_csv(datacwur2016)
"""
Explanation: World University Rankings
We can find, at least, three global university rankings with different methodologies to classify the better schools:
The Times Higher Education World University Ranking: https://www.timeshighereducation.com/world-university-rankings
The Academic Ranking of World Universities: http://www.shanghairanking.com/
The Center for World University Rankings: http://cwur.org/
1. The Times Higher Education World University Ranking (Times)
The Times World University Ranking:
| Year | Link (website) |
| ---- | :------ |
| 2018 | https://www.timeshighereducation.com/world-university-rankings/2018/world-ranking#!/page/0/length/-1/sort_by/rank/sort_order/asc/cols/stats |
| 2017 | https://www.timeshighereducation.com/world-university-rankings/2017/world-ranking#!/page/0/length/-1/sort_by/rank/sort_order/asc/cols/stats |
| 2016 | |
| 2015 | |
| 2014 | |
| 2013 | |
| 2012 | |
2. The Academic Ranking of World Universities (China)
The China World University Ranking:
| Year | Link (website) |
| :---- | :-------------- |
| 2017 | http://www.shanghairanking.com/ARWU2017.html |
| 2016 | http://www.shanghairanking.com/ARWU2016.html |
| 2015 | http://www.shanghairanking.com/ARWU2015.html |
| 2014 | http://www.shanghairanking.com/ARWU2014.html |
| 2013 | http://www.shanghairanking.com/ARWU2013.html |
| 2012 | http://www.shanghairanking.com/ARWU2012.html |
3. The Center for World University Rankings (CWUR)
The CWUR World University Ranking:
| Year | Link (website) |
| :---- | :-------------- |
| 2016 | http://cwur.org/2016.php |
| 2015 | http://cwur.org/2015.php |
| 2014 | http://cwur.org/2014.php |
| 2013 | http://cwur.org/2013.php |
| 2012 | http://cwur.org/2012.php |
3.1 - CWUR (2016 dataset)
2016 ranking
End of explanation
"""
cwur2016.head(10)
"""
Explanation: Top 10 universities
End of explanation
"""
cwur2016.groupby('Location').count().sort_values(by=['World Rank'], ascending=[False]).head(5)
cwur2016['Location'].value_counts()
cwur2016.loc[cwur2016['Location'] == 'USA']['Location'].count()
"""
Explanation: Ranking by country (number of instituitions)
End of explanation
"""
cwur2016.loc[cwur2016['Location'] == 'Brazil']['Institution']
cwur2016.loc[cwur2016['Location'] == 'Brazil']['Institution'].count()
"""
Explanation: Brazil's universities
End of explanation
"""
# Defining a specific Data Frame to plot
cwur_graph = cwur2016['Location'].value_counts()
"""
Explanation: Ploting World University Ranking / Country (CWUR, 2016)
End of explanation
"""
|
RuthAngus/LSST-max | code/LSST_inject_and_recover.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
from gatspy.periodic import LombScargle
import sys
%matplotlib inline
from toy_simulator import simulate_LSST
from trilegal_models import random_stars
import simple_gyro as sg
import pandas as pd
"""
Explanation: Recovering rotation periods in simulated LSST data
End of explanation
"""
fname = "output574523944248.dat"
N = 100
logAges, bvs, logTeff, rmag = random_stars(fname, N)
teff = 10**logTeff
"""
Explanation: Randomly select targets from a TRILEGAL output.
End of explanation
"""
m = bvs > .4 # select only cool stars
cool_ages = 10**logAges[m] * 1e-9
cool_ps = sg.period(cool_ages, bvs[m])
cool_teffs = teff[m]
cool_rmags = rmag[m]
"""
Explanation: Calculate periods from ages and colours for cool stars
End of explanation
"""
hot_ages = 10**logAges[~m] * 1e-9 # select hot stars
hot_teffs = teff[~m]
hot_rmags = rmag[~m]
# copy parameters for two Gaussians from hot_stars ipython notebook
A1, A2, mu1, mu2, sig1, sig2 = 254.11651209, 49.8149765, 3.00751724, 3.73399554, 2.26525979, 8.31739725
hot_ps = np.zeros_like(hot_ages)
hot_ps1 = np.random.randn(int(len(hot_ages)*(1 - A2/A1)))*sig1 + mu1 # mode 1
hot_ps2 = np.random.randn(int(len(hot_ages)*(A2/A1)))*sig2 + mu2 # mode 2
hot_ps[:len(hot_ps1)] = hot_ps1
hot_ps[len(hot_ps1):len(hot_ps2)] = hot_ps2
tot = len(hot_ps1) + len(hot_ps2)
hot_ps[tot:] = np.random.randn(len(hot_ps)-tot)*sig2 + mu2 # make up the total number of Ps
# combine the modes
age = np.concatenate((cool_ages, hot_ages))
ps = np.concatenate((cool_ps, hot_ps))
teff = np.concatenate((cool_teffs, hot_teffs))
rmag = np.concatenate((cool_rmags, hot_rmags))
"""
Explanation: Draw from a sum of two Gaussians (modelled in another notebook) that describes the period distribution for hot stars. Approximations: I have lumped all stars with colour < 0.4 in together AND I actually used teff = 6250, not B-V = 0.4 in the other notebook.
End of explanation
"""
plt.hist(age)
plt.xlabel("Age (Gyr)")
plt.hist(ps)
plt.xlabel("Period (days)")
plt.hist(rmag)
plt.xlabel("r mag")
## Arrays of random (log-normal) periods and (uniform) amplitudes.
#min_period, max_period = 1, 100 # days
#ps = np.exp(np.random.uniform(np.log(min_period), np.log(max_period), N)) # periods
#amps = np.random.uniform(10, 300, N) # ppm
"""
Explanation: Make histograms of the ages and periods
End of explanation
"""
# Column headings: log10P, log10R, stdR, Nbin
teff_bins = [3500, 4000, 4500, 5000, 5500, 6000]
d35 = pd.read_csv("data/rot_v_act3500.txt")
d40 = pd.read_csv("data/rot_v_act4000.txt")
d45 = pd.read_csv("data/rot_v_act4500.txt")
d50 = pd.read_csv("data/rot_v_act5000.txt")
d55 = pd.read_csv("data/rot_v_act5500.txt")
d60 = pd.read_csv("data/rot_v_act6000.txt")
plt.step(d35["log10P"], d35["log10R"], label="T=3500")
plt.step(d40["log10P"], d40["log10R"], label="T=4000")
plt.step(d45["log10P"], d45["log10R"], label="T=4500")
plt.step(d50["log10P"], d50["log10R"], label="T=5000")
plt.step(d55["log10P"], d55["log10R"], label="T=5500")
plt.step(d60["log10P"], d60["log10R"], label="T=6000")
plt.legend()
plt.xlabel("log Period")
plt.ylabel("log Range")
"""
Explanation: Use Derek's results to calculate amplitudes
End of explanation
"""
def find_nearest (array, value):
"""
Match a period to a bin.
array: array of bin heights.
value: the period of the star.
Returns the value and index of the bin.
"""
m = np.abs(array-value) == np.abs(array-value).min()
return array[m], m
def assign_amps(ps, log10P, log10R, stdR):
"""
Take periods and bin values and return an array of amplitudes.
"""
npi = np.array([find_nearest(10**log10P, p) for p in ps]) # match periods to bins
nearest_ps, inds = npi[:, 0], npi[:, 1]
log_ranges = np.array([log10R[i] for i in inds])[:, 0] # array of ranges for each *
std_ranges = np.array([stdR[i] for i in inds])[:, 0] # array of stdevs in range for each *
return np.random.randn(len(ps))*std_ranges + log_ranges # draw amps from Gaussians
def make_arrays(data, temp_bin):
"""
Amplitude arrays for each temperature bin
"""
P, R, std = np.array(data["log10P"]), np.array(data["log10R"]), np.array(data["stdR"])
if temp_bin == 3500:
m = teff < 3750
elif temp_bin == 6000:
m = teff > 6000
else:
m = (temp_bin - 250 < teff) * (teff < temp_bin + 250)
periods, teffs, rmags = ps[m], teff[m], rmag[m]
amplitudes = assign_amps(periods, P, R, std)
return periods, amplitudes, teffs, rmags
def LSST_sig(m):
"""
Approximate the noise in figure 2 of arxiv:1603.06638 from the apparent r-mag.
Returns the noise in magnitudes and ppm.
"""
if m < 19:
return .005
mags = np.array([19, 20, 21, 22, 23, 24, 25])
sigs = np.array([.005, .007, .01, .02, .03, .1, .2])
return sigs[np.abs(mags - m) == np.abs(mags-m).min()][0]
pers, logamps, teffs, rmags = np.concatenate((make_arrays(d35, 3500), make_arrays(d40, 4000),
make_arrays(d45, 4500), make_arrays(d50, 5000),
make_arrays(d55, 5500), make_arrays(d60, 6000)), axis=1)
amps = 10**logamps # parts per million
noise = LSST_sig(rmag[0])
noises_mag = np.array([LSST_sig(mag) for mag in rmags])
noises_ppm = (1 - 10**(-noises_mag/2.5)) * 1e6
"""
Explanation: Assign amplitudes
End of explanation
"""
%%capture
# amps = np.random.uniform(10, 300, N) # ppm
path = "simulations" # where to save the lcs
[simulate_LSST(i, pers[i], amps[i], path, noises_ppm[i]) for i in range(len(pers))] # simulations
# save the true values
ids = np.arange(len(pers))
data = np.vstack((ids, pers, amps))
np.savetxt("{0}/truth.txt".format(path), data.T)
"""
Explanation: Simulate light curves
End of explanation
"""
id = 0
sid = str(int(id)).zfill(4)
path = "results" # where to save results
x, y, yerr = np.genfromtxt("simulations/{0}.txt".format(sid)).T # load a fake light curve
plt.errorbar(x, y, yerr=yerr, fmt="k.", capsize=0)
"""
Explanation: Load and plot an example light curve
End of explanation
"""
ps = np.linspace(2, 100, 1000) # the period array (in days)
model = LombScargle().fit(x, y, yerr)
pgram = model.periodogram(ps)
# find peaks
peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] < pgram[i] and pgram[i+1] < pgram[i]])
if len(peaks):
period = ps[pgram==max(pgram[peaks])][0]
else: period = 0
plt.plot(ps, pgram) # plot the pgram
plt.axvline(period, color="r") # plot the position of the highest peak
# load and plot the truth
ids, true_ps, true_as = np.genfromtxt("simulations/truth.txt").T
plt.axvline(true_ps[id], color="g") # plot the position of the highest peak
print(period, true_ps[id])
"""
Explanation: Compute a periodogram
End of explanation
"""
ids = np.arange(len(pers))
periods = np.zeros_like(ids)
for i, id in enumerate(ids):
sid = str(int(id)).zfill(4)
x, y, yerr = np.genfromtxt("simulations/{0}.txt".format(sid)).T # load a fake light curve
model = LombScargle().fit(x, y, yerr) # compute pgram
pgram = model.periodogram(ps)
# find peaks
peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] < pgram[i] and pgram[i+1] < pgram[i]])
if len(peaks):
period = ps[pgram==max(pgram[peaks])][0]
else: period = 0
periods[i] = period
"""
Explanation: Now compute LS pgrams for a set of LSST light curves and save the highest peak .
End of explanation
"""
data = np.vstack((true_ps, periods, teffs, rmags, true_as, noises_ppm))
np.savetxt("rotation_results{0}.txt".format(fname), data.T)
"""
Explanation: Save the data
End of explanation
"""
plt.plot(true_ps, periods, "k.")
xs = np.linspace(min(true_ps), max(true_ps), 100)
plt.plot(xs, xs, "r")
tau = .1 # the recovery must be within a factor of *threshold* of the truth
plt.plot(xs, xs-tau*xs, "r--")
plt.plot(xs, xs+tau*xs, "r--")
"""
Explanation: Plot the recovered periods vs the true periods.
End of explanation
"""
m = (true_ps - tau*true_ps < periods) * (periods < true_ps + tau*true_ps)
plt.hist(true_ps, 15, color="b", label="all")
plt.hist(true_ps[m], 15, color="r", alpha=.5, label="recovered")
plt.legend(loc="best")
print(len(true_ps), "injected", len(true_ps[m]), "recovered")
print(len(true_ps[m])/len(true_ps)*100, "percent success")
"""
Explanation: Decide whether the recovery was successful or not
End of explanation
"""
|
maxkleiner/maXbox4 | ARIMA_Predictor21.ipynb | gpl-3.0 | #sign:max: MAXBOX8: 03/02/2021 18:34:41
# optimal moving average OMA for market index signals ARIMA study- Max Kleiner
# v2 shell argument forecast days - 4 lines compare - ^GDAXI for DAX
# pip install pandas-datareader
# C:\maXbox\mX46210\DataScience\princeton\AB_NYC_2019.csv AB_NYC_2019.csv
#https://medium.com/abzuai/the-qlattice-a-new-machine-learning-model-you-didnt-know-you-needed-c2e037878cd
#https://www.kaggle.com/dgomonov/data-exploration-on-nyc-airbnb 41
#https://www.kaggle.com/duygut/airbnb-nyc-price-prediction
#https://www.machinelearningplus.com/time-series/arima-model-time-series-forecasting-python/
import numpy as np
import matplotlib.pyplot as plt
import sys
import numpy as np, pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import adfuller, acf
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.figsize':(9,7), 'figure.dpi':120})
# Import data
wwwus = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/wwwusage.csv', names=['value'], header=0)
import pandas as pd
# Accuracy metrics
def forecast_accuracy(forecast, actual):
mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE
me = np.mean(forecast - actual) # ME
mae = np.mean(np.abs(forecast - actual)) # MAE
mpe = np.mean((forecast - actual)/actual) # MPE
rmse = np.mean((forecast - actual)**2)**.5 # RMSE
corr = np.corrcoef(forecast, actual)[0,1] # corr
mins = np.amin(np.hstack([forecast[:,None],
actual[:,None]]), axis=1)
maxs = np.amax(np.hstack([forecast[:,None],
actual[:,None]]), axis=1)
minmax = 1 - np.mean(mins/maxs) # minmax
acf1 = acf(fc-test)[1] # ACF1
return({'mape':mape, 'me':me, 'mae': mae,
'mpe': mpe, 'rmse':rmse, 'acf1':acf1,
'corr':corr, 'minmax':minmax})
#wwwus = pd.read_csv(r'C:\maXbox\mX46210\DataScience\princeton\1022dataset.txt', \
# names=['value'], header=0)
print(wwwus.head(10).T) #Transposed for column overview
#1. How to find the order of differencing (d) in ARIMA model
result = adfuller(wwwus.value.dropna())
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
#"""
# Original Series
fig, axes = plt.subplots(3, 2, sharex=True)
axes[0, 0].plot(wwwus.value); axes[0, 0].set_title('Orig Series')
plot_acf(wwwus.value, ax=axes[0, 1], lags=60)
# 1st Differencing
axes[1, 0].plot(wwwus.value.diff()); axes[1, 0].set_title('1st Order Differencing')
plot_acf(wwwus.value.diff().dropna(), ax=axes[1, 1], lags=60)
# 2nd Differencing
axes[2, 0].plot(wwwus.value.diff().diff()); axes[2, 0].set_title('2nd Order Differencing')
plot_acf(wwwus.value.diff().diff().dropna(), ax=axes[2, 1], lags=60)
plt.show()
#2. How to find the order of the AR term (p)
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].plot(wwwus.value.diff()); axes[0].set_title('1st Differencing')
axes[1].set(ylim=(0,5))
plot_pacf(wwwus.value.diff().dropna(), ax=axes[1], lags=100)
plt.show()
#3. How to find the order of the MA term (q)
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].plot(wwwus.value.diff()); axes[0].set_title('1st Differencing')
axes[1].set(ylim=(0,1.2))
plot_acf(wwwus.value.diff().dropna(), ax=axes[1] , lags=60)
plt.show()
#"""
#4. How to build the ARIMA Model
model = ARIMA(wwwus.value, order=(1,1,2))
model_fit = model.fit(disp=0)
print('first fit ',model_fit.summary())
"""
# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, ax = plt.subplots(1,2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()
"""
#5. Plot Predict Actual vs Fitted
# When you set dynamic=False in-sample lagged values are used for prediction.
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
model_fit.plot_predict(dynamic=False)
plt.show()
#That is, the model gets trained up until the previous value to make next prediction. This can make a fitted forecast and actuals look artificially good.
# Now Create Training and Test
train = wwwus.value[:80]
test = wwwus.value[80:]
#model = ARIMA(train, order=(3, 2, 1))
model = ARIMA(train, order=(2, 2, 3))
fitted = model.fit(disp=-1)
print('second fit ',fitted.summary())
# Forecast
fc,se,conf = fitted.forecast(20, alpha=0.05) # 95% conf
# Make as pandas series
fc_series = pd.Series(fc, index=test.index)
lower_series = pd.Series(conf[:,0], index=test.index)
upper_series = pd.Series(conf[:,1], index=test.index)
# Plot
plt.figure(figsize=(12,5), dpi=100)
plt.plot(train, label='training')
plt.plot(test, label='actual')
plt.plot(fc_series, label='forecast')
plt.fill_between(lower_series.index, lower_series, upper_series,
color='k', alpha=.15)
plt.title('maXbox4 Forecast vs Actuals ARIMA')
plt.legend(loc='upper left', fontsize=8)
plt.show()
print(forecast_accuracy(fc, test.values))
print('Around 5% MAPE implies a model is about 95% accurate in predicting next 20 observations.')
"""
Explanation: <a href="https://colab.research.google.com/github/maxkleiner/maXbox4/blob/master/ARIMA_Predictor21.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
How to find the order of (p,d,q) in ARIMA timeseries model
A time series is a sequence where a metric is recorded over regular time intervals.
Inspired by
https://www.machinelearningplus.com/time-series/arima-model-time-series-forecasting-python/
End of explanation
"""
#1. How to find the order of differencing (d) in ARIMA model
result = adfuller(wwwus.value.dropna())
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
"""
Explanation: Step by Step Code Order
#1. How to find the order of differencing (d) in ARIMA model
p is the order of the AR term
q is the order of the MA term
d is the number of differencing required to make the time series stationary as I term
End of explanation
"""
# Original Series
fig, axes = plt.subplots(3, 2, sharex=True)
axes[0, 0].plot(wwwus.value); axes[0, 0].set_title('Orig Series')
plot_acf(wwwus.value, ax=axes[0, 1], lags=60)
# 1st Differencing
axes[1, 0].plot(wwwus.value.diff()); axes[1, 0].set_title('1st Order Differencing')
plot_acf(wwwus.value.diff().dropna(), ax=axes[1, 1], lags=60)
# 2nd Differencing
axes[2, 0].plot(wwwus.value.diff().diff()); axes[2, 0].set_title('2nd Order Differencing')
plot_acf(wwwus.value.diff().diff().dropna(), ax=axes[2, 1], lags=60)
plt.show()
"""
Explanation: A p-value less than 0.05 (typically ≤ 0.05) is statistically significant. It indicates strong evidence against the null hypothesis, as there is less than a 5% probability the null hypo is correct (and the results are by random). Therefore, we reject the null hypothesis, and accept the alternative hypothesis (there is correlation).
Our timeserie is not significant, why?
Example:
ADF Statistic: -2.464240
p-value: 0.124419
0-Hypothesis non stationary
0.12 > 0.05 -> not significant, therefore we can not reject the 0-hypthesis so our time series is non stationary and we had to differencing it to make it stationary.
The purpose of differencing it is to make the time series stationary.
End of explanation
"""
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].plot(wwwus.value.diff()); axes[0].set_title('1st Differencing')
axes[1].set(ylim=(0,5))
plot_pacf(wwwus.value.diff().dropna(), ax=axes[1], lags=100)
plt.show()
"""
Explanation: For the above series, the time series reaches stationarity with two orders of differencing. But we use for the beginning 1 order as a conservative part. Let me explain that:
D>2 is not allowed in statsmodels.tsa.arima_model!
Maybe d>2 is not allowed means our best bet is to start simple, check if integrating once grants stationarity. If so, we can fit a simple ARIMA model and examine the ACF of the residual values to get a better feel about what orders of differencing to use. Also a drawback, if we integrate more than two times (d>2), we lose n observations, one for each integration. And one of the most common errors in ARIMA modeling is to "overdifference" the series and end up adding extra AR or MA terms to undo the forecast damage, so the author (I assume) decides to raise this exception.
#2. How to find the order of the AR term (p)
End of explanation
"""
fig, axes = plt.subplots(1, 2, sharex=True)
axes[0].plot(wwwus.value.diff()); axes[0].set_title('1st Differencing')
axes[1].set(ylim=(0,1.2))
plot_acf(wwwus.value.diff().dropna(), ax=axes[1] , lags=90)
plt.show()
"""
Explanation: #3. How to find the order of the MA term (q)
End of explanation
"""
model = ARIMA(wwwus.value, order=(1,1,2))
model_fit = model.fit(disp=0)
print('first fit ',model_fit.summary())
"""
Explanation: 4. How to build the ARIMA Model
End of explanation
"""
residuals = pd.DataFrame(model_fit.resid)
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, ax = plt.subplots(1,2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()
"""
Explanation: Notice here the coefficient of the MA2 term is close to zero (-0.0010 ) and the P-Value in ‘P>|z|’ column is highly insignificant (0.9). It should ideally be less than 0.05 for the respective X to be significant << 0.05.
5. Plot residual errors
Let’s plot the residuals to ensure there are no patterns (that is, look for constant mean and variance).
End of explanation
"""
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
model_fit.plot_predict(dynamic=False)
plt.show()
"""
Explanation: 6. Plot Predict Actual vs Fitted
When you set dynamic=False in-sample lagged values are used for prediction.
That is, the model gets trained up until the previous values to make next prediction. This can make a fitted forecast and actuals look artificially good.
End of explanation
"""
train = wwwus.value[:80]
test = wwwus.value[80:]
#model = ARIMA(train, order=(3, 2, 1))
model = ARIMA(train, order=(2, 2, 3))
fitted = model.fit(disp=-1)
print('second fit ',fitted.summary())
# Forecast
fc,se,conf = fitted.forecast(20, alpha=0.05) # 95% conf
# Make as pandas series
fc_series = pd.Series(fc, index=test.index)
lower_series = pd.Series(conf[:,0], index=test.index)
upper_series = pd.Series(conf[:,1], index=test.index)
# Plot
plt.figure(figsize=(12,5), dpi=100)
plt.plot(train, label='training')
plt.plot(test, label='actual')
plt.plot(fc_series, label='forecast')
plt.fill_between(lower_series.index, lower_series, upper_series,
color='k', alpha=.15)
plt.title('maXbox4 Forecast vs Actuals ARIMA')
plt.legend(loc='upper left', fontsize=8)
plt.show()
"""
Explanation: 7. Now Create Training and Test Validation
We can see that ARIMA is adequately forecasting the seasonal pattern in the series. In terms of the model performance, the RMSE (root mean squared error) and MFE (mean forecast error) and also best in terms of the lowest BIC .
End of explanation
"""
print(forecast_accuracy(fc, test.values))
print('Around 5% MAPE implies a model is about 95% accurate in predicting next 20 observations.')
"""
Explanation: 8. Some scores and performance
The 20 observations depends on the train/test set fc,se,conf = fitted.forecast(20, alpha=0.05) # 95% conf
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cas/cmip6/models/sandbox-3/land.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'sandbox-3', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: CAS
Source ID: SANDBOX-3
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:45
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
coolharsh55/advent-of-code | 2016/python3/Day19.ipynb | mit | no_elves = 5
elves = [elf for elf in range(1, no_elves + 1)]
print(elves)
"""
Explanation: Day 19: An Elephant Named Joseph
author: Harshvardhan Pandit
license: MIT
link to problem statement
The Elves contact you over a highly secure emergency channel. Back at the North Pole, the Elves are busy misunderstanding White Elephant parties.
Each Elf brings a present. They all sit in a circle, numbered starting with position 1. Then, starting with the first Elf, they take turns stealing all the presents from the Elf to their left. An Elf with no presents is removed from the circle and does not take turns.
For example, with five Elves (numbered 1 to 5):
1
5 2
4 3
Elf 1 takes Elf 2's present.
Elf 2 has no presents and is skipped.
Elf 3 takes Elf 4's present.
Elf 4 has no presents and is also skipped.
Elf 5 takes Elf 1's two presents.
Neither Elf 1 nor Elf 2 have any presents, so both are skipped.
Elf 3 takes Elf 5's three presents.
So, with five Elves, the Elf that sits starting in position 3 gets all the presents.
With the number of Elves given in your puzzle input, which Elf gets all the presents?
Solution logic
At every turn, the next elf is skipped or removed from the circle. This goes on until only one elf is remaining. We will simulate the entire scenario as a naive solution before moving on to a better, optimized version.
Test Data
The test data has 5 elves, with elf 3 getting all the presents. We generate an array of 5 elves as the test input.
End of explanation
"""
def play_round(elves):
_elves = []
elf = 0
while elf < len(elves):
_elves.append(elves[elf])
elf += 2
if len(elves) % 2 == 1:
_elves.pop(0)
return _elves
"""
Explanation: Simulate one round of stealing presents
To simulate one round, we define the function play_round that takes an elf, then skips one over while iterating over the list. If the number of elves are odd, then the first elf's presents are stolen by the last elf, and therefore, we drop the first elf from the list.
End of explanation
"""
while len(elves) > 1:
elves = play_round(elves)
print(elves[0])
"""
Explanation: Continue simulating rounds until only one elf is remaining
End of explanation
"""
with open('../inputs/day19.txt', 'r') as f:
input_data = int(f.readline())
elves = [elf for elf in range(1, input_data + 1)]
while len(elves) > 1:
elves = play_round(elves)
print('answer', elves[0])
"""
Explanation: Run on the given input
End of explanation
"""
n = 2
k = 2
j = [1]
while n <= input_data:
j.append((j[n - 2] + k - 1) % n + 1)
n += 1
print('answer', j[-1])
"""
Explanation: Optimized version - Josephus problem
The problem statement is actually quite well known as the Josephus problem which can be generalized to a counting out problem. The solution to solve the Josephus problem is given as:
f(n) = f(n - 2) + k - 1) % n + 1
Where n is the number of players, k is the skip count where k-1 people are skipped and the k-th is executed. (in this case, k = 2).
End of explanation
"""
# DO NOT RUN
def do_not_run():
elves = [elf for elf in range(1, input_data + 1)]
while len(elves) > 1:
elves.pop(len(elves)//2)
elves = elves[1:] + elves[:1]
print(elves[0])
"""
Explanation: Part Two
Realizing the folly of their present-exchange rules, the Elves agree to instead steal presents from the Elf directly across the circle. If two Elves are across the circle, the one on the left (from the perspective of the stealer) is stolen from. The other rules remain unchanged: Elves with no presents are removed from the circle entirely, and the other elves move in slightly to keep the circle evenly spaced.
For example, with five Elves (again numbered 1 to 5):
The Elves sit in a circle; Elf 1 goes first:
1
5 2
4 3
Elves 3 and 4 are across the circle; Elf 3's present is stolen, being the one to the left. Elf 3 leaves the circle, and the rest of the Elves move in:
1 1
5 2 --> 5 2
4 - 4
Elf 2 steals from the Elf directly across the circle, Elf 5:
1 1
- 2 --> 2
4 4
Next is Elf 4 who, choosing between Elves 1 and 2, steals from Elf 1:
- 2
2 -->
4 4
Finally, Elf 2 steals from Elf 4:
2
--> 2
-
So, with five Elves, the Elf that sits starting in position 2 gets all the presents.
With the number of Elves given in your puzzle input, which Elf now gets all the presents?
Solution logic
We need to redefine our play_round function, where instead of only skipping over the next elf, we need to remove the middle elf from the list. Which one is the middle elf depends on the length of the list at that time. So at each turn (and not round), the length of the list changes. Since we are implmenting the naive solution, we will iterate over the list once in the loop instead of optimizing the index access.
Since we need to eliminate the middle elf at each turn, we must find the middle elf. Let us start by seeing what the pattern is by plotting the number of elfs with the middle elf (the one to be eliminated) for each arrangement.
n 0-indexed array
1 -
2 1
3 1
4 2
5 2
6 3
7 3
8 4
9 4
We can see that the element to remove is given by n/2 where n is the total number of elves in the list at that time. Taking this into account, we must now account for the fact that the list mutates after every turn.
turn list play eliminated effective-list
0 1, 2, 3, 4, 5 - - -
1 1, 2, X, 4, 5 1 3 1, 2, 3, 4, 5
2 1, 2, X, 4, X 2 5 2, 4, 5, 1
3 X, 2, X, 4, X 4 1 4, 1, 2
4 X, 2, X, X, X 2 4 2, 4
If we compare the eliminated index and re-list such that the elf currently playing is always at the top/front/start of the array, we get the effective-list. It it, the resultant array at each turn is left-shifted by one. We exploit this to create the resultant array. Thus our strategy becomes -
remove the elf at index n/2
left the list by 1 - pop the first element and add it to the end
Continue this approach until only 1 elf is left.
Note that the simulation takes a LOT of time. Being n^2, it is very inefficient. Which is why I don't actually run it here. It'll take time. Seriously.
End of explanation
"""
from collections import deque
left = deque(i for i in range(1, input_data//2 + 1))
right = deque(i for i in range(input_data, input_data//2, -1))
print('tree left->', len(left), '1:', left[0], 'last:', left[-1])
print('tree right->', len(right), '1:', right[0], 'last:', right[-1])
while left and right:
if len(left) > len(right):
left.pop()
else:
right.pop()
right.appendleft(left.popleft())
left.append(right.pop())
"""
Explanation: Alternative logic
We use the concept of balanced trees, where we split the elves into two different lists. In one list, we add the first half elements, and in the second list, we add the other half in the reverse order, i.e. append to the front. Since the elves are always removed from the middle of the list, we can do this easily in a tree, as the middle is always the root. After removal, we need to balance the tree by moving elements from the top of the left branch (the starting elves) to the top of the right branch (the last elves), and then move the elements from the bottom of the right branch (the middle elves) to the bottom of the left branch (the middle elves).
left branch --> first half of elements, normal order
right branch --> second half of elements, reverse order
while length of tree (left + right) > 1:
if left tree has more nodes:
remove node from end of left branch
else:
remove node from end of right branch
remove node from top of left branch and append to top of right branch
remove node from bottom of right branch and append to bottom of left branch
In python, lists are great data structures, but they are not performant when it comes to removing things from the front. Since we need to remove things from both ends, we will use dequeue which is a double ended queue and is performant for insertions and removals at both ends, but will perform badly for other operations such as splicing and random removals (which we do not require).
End of explanation
"""
if left:
print('answer', left[0])
else:
print('answer', right[0])
"""
Explanation: Since we do not know which of the left or right branches might contain the final element, we need to check which of the queues are not empty and then retrieve the answer from that.
End of explanation
"""
|
hannorein/reboundx | ipython_examples/Migration.ipynb | gpl-3.0 | import rebound
import reboundx
import numpy as np
sim = rebound.Simulation()
ainner = 1.
aouter = 10.
e0 = 0.1
inc0 = 0.1
sim.add(m=1.)
sim.add(m=1e-6,a=ainner,e=e0, inc=inc0)
sim.add(m=1e-6,a=aouter,e=e0, inc=inc0)
sim.move_to_com() # Moves to the center of momentum frame
ps = sim.particles
"""
Explanation: Migration
For modifying orbital elements, REBOUNDx offers two implementations. modify_orbits_direct directly calculates orbital elements and modifies those, while modify_orbits_forces applies forces that when orbit-averaged yield the desired behavior. Let's set up a simple simulation of two planets on initially eccentric and inclined orbits:
End of explanation
"""
rebx = reboundx.Extras(sim)
mof = rebx.load_force("modify_orbits_forces")
rebx.add_force(mof)
"""
Explanation: Now let's set up reboundx and add the modify_orbits_forces effect, which implements the migration using forces:
End of explanation
"""
tmax = 1.e3
ps[1].params["tau_a"] = -tmax/2.
ps[2].params["tau_a"] = -tmax
"""
Explanation: Both modify_orbits_forces and modify_orbits_direct exponentially alter the semimajor axis, on an e-folding timescale tau_a. If tau_a < 0, you get exponential damping, and for tau_a > 0, exponential growth, i.e.,
\begin{equation}
a = a_0e^{t/\tau_a}
\end{equation}
In general, each body will have different damping timescales. By default all particles have timescales of infinity, i.e., no effect. The units of time are set by the units of time in your simulation.
Let's set a maximum time for our simulation, and give our two planets different (inward) migration timescales. This can simply be done through:
End of explanation
"""
Nout = 1000
a1,a2 = np.zeros(Nout), np.zeros(Nout)
times = np.linspace(0.,tmax,Nout)
for i,time in enumerate(times):
sim.integrate(time)
a1[i] = ps[1].a
a2[i] = ps[2].a
"""
Explanation: Now we run the simulation like we would normally with REBOUND. Here we store the semimajor axes at 1000 equally spaced intervals:
End of explanation
"""
a1pred = [ainner*np.e**(t/ps[1].params["tau_a"]) for t in times]
a2pred = [aouter*np.e**(t/ps[2].params["tau_a"]) for t in times]
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,5))
ax = plt.subplot(111)
ax.set_yscale('log')
ax.plot(times,a1, 'r.', label='Integration')
ax.plot(times,a2, 'r.')
ax.plot(times,a1pred, 'k--',label='Prediction')
ax.plot(times,a2pred, 'k--')
ax.set_xlabel("Time", fontsize=24)
ax.set_ylabel("Semimajor axis", fontsize=24)
ax.legend(fontsize=24)
"""
Explanation: Now let's plot it on a linear-log scale to check whether we get the expected exponential behavior. We'll also overplot the expected exponential decays for comparison.
End of explanation
"""
|
bearing/dosenet-analysis | calibration/Untitled.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
#plt.plot([1,2,3,4])
#plt.show()
csv = np.genfromtxt('k40_cal_2019-02-11_D3S.csv', delimiter= ",")
plt.plot(csv.T)
plt.show()
np.max(csv.T)
summed = np.sum(csv.T, axis=1)
plt.plot(summed)
plt.show()
summed[-1]
"""
Explanation: Goals:
- Determine range of channels
- Determine channel at center of peak
End of explanation
"""
#x = findPeak(summed, (2000, 2500))
[x for x in range(len(csv)) if np.max(csv[x][2000:2500]) > 7.5]
samp = summed[2000:2500]
mu = np.mean(samp)
sig = np.std(samp)
print(mu, sig)
#plt.plot(samp)
def func(x, a, m, s, c):
return a * np.exp(-(x - m)**2 / (2 * s**2)) + c
xdata = range(0,len(samp))
trydata = func(samp, np.max(samp), mu, sig, np.max(samp) + 50)
p0 = [250,250,50,10]
popt, pcov = curve_fit(func, xdata, samp, p0)
print(popt)
plt.plot(xdata,samp)
plt.plot(xdata,func(xdata, *popt))
plt.show()
"""
Explanation: <span style="color:red">
The last value appears to be noise.</span>.
<span style="color:red">
I am not sure what is up with the first peak but I know it is not the peak of interest, which is around index 2000 to 2500.</span>.
<span style="color:red">
Right now I am not sure how to determinine that the range of the peak of interest is the values 2000-2500 but I will hard code the values for now</span>.
Below are the channels with values over 7.5 in the range of 2000 to 2500.
End of explanation
"""
fit = func(xdata, *popt)
channel = np.argmax(fit)
print("The channel number is", channel,"and its values is", np.max(fit))
plt.plot(xdata,samp)
plt.plot(xdata,func(xdata, *popt))
plt.plot(channel, np.max(fit), 'ro')
plt.show()
print(int(popt[1] + 2000))
"""
Explanation: Find the channel# of the peak
End of explanation
"""
snipped = summed.copy()
snipped[-1] = 0
snipped[:1000] = np.mean(summed)/5
plt.plot(snipped)
plt.show()
plt.plot(summed)
plt.show()
print(np.std(snipped), np.std(summed))
"""
Explanation: The End.
Everything below here is no longer relevant____
Okay but all of that was cheating and I need to use the summed plot to find the width of the peak.
Then the plan is to take the highest value within that range and find the channel it corresponds to.
I think i will start by disregarding the first peak and only looking at values above index 1000, and getting rid of the final value
End of explanation
"""
def peakFinder(data):
std = np.std(data)
points = []
for x in range(len(data)):
if data[x] == int(std):
points = points + [x]
for p in range(len(points) - 1):
data[points[p]:
return peak
peakFinder(snipped)
"""
Explanation: Okay so the plan for finding the peak will be to look for points above the standard deviation and to see if 9/10(arbitrary value) of the values in between are greater than the STD.
End of explanation
"""
# This indexHelper helps me avoid array index out of bound errors
def indexHelper(i, top, up):
if i <= 0 or i >= top - 1:
return 0
elif up:
return i+1
else:
return i-1
# Returns if x-1 < x > x+1
def isLiteralPeak(array, x, top):
return array[indexHelper(x, top, False)] < array[x] and array[x] > array[indexHelper(x, top, True)]
def findPeak(array, rng):
top = len(array)
peaks = []`
[peaks.append((x, array[x])) for x in range(rng[0], rng[1]) if isLiteralPeak(array, x, top)]
return peaks
def rangeFinder(row):
x, y = 0, 0
for i in range(len(row)):
if row[i] != 0:
x = i
break
for j in reversed(range(len(row))):
if row[j] != 0:
y = j
break
return (x, y)
def channelRange(csv):
return [(i, rangeFinder(csv[i])) for i in range(len(csv)) if rangeFinder(csv[i]) != (0, 0)]
channelRange(csv.T)
"""
Explanation: Below is random code I wrote that turned out to be useless
End of explanation
"""
|
kaka0525/Process-Bike-Share-data-with-Pandas | bike_plot.ipynb | mit | weather = pd.read_table("daily_weather.tsv")
usage = pd.read_table("usage_2012.tsv")
station = pd.read_table("stations.tsv")
weather.loc[weather['season_code'] == 1, 'season_desc'] = 'winter'
weather.loc[weather['season_code'] == 2, 'season_desc'] = 'spring'
weather.loc[weather['season_code'] == 3, 'season_desc'] = 'summer'
weather.loc[weather['season_code'] == 4, 'season_desc'] = 'fall'
weather['date'] = pd.to_datetime(weather['date'])
month_rental = weather.groupby(weather['date'].dt.month)['total_riders'].sum()
mean = weather.groupby('season_desc')['temp'].mean()
weather['temp'].plot()
weather['month'] = pd.DatetimeIndex(weather.date).month
weather.groupby('month')['temp', 'humidity'].mean().plot(kind='bar')
"""
Explanation: Changed design
1.Plot the daily temperature over the course of the year. (This should probably be a line chart.) Create a bar chart that shows the average temperature and humidity by month.¶
End of explanation
"""
plt.scatter(
weather[weather['season_desc'] == 'winter']['temp'],
weather[weather['season_desc'] == 'winter']['total_riders'],
s=30, color='blue', label='winter')
plt.scatter(
weather[weather['season_desc'] == 'spring']['temp'],
weather[weather['season_desc'] == 'spring']['total_riders'],
s=30, color='magenta', label='spring')
plt.scatter(
weather[weather['season_desc'] == 'summer']['temp'],
weather[weather['season_desc'] == 'summer']['total_riders'],
s=30, color='cyan', label='summer')
plt.scatter(
weather[weather['season_desc'] == 'fall']['temp'],
weather[weather['season_desc'] == 'fall']['total_riders'],
s=30, color='yellow', label='fall')
plt.title("Daily rental volume and temperature")
plt.legend(loc=4)
plt.show()
"""
Explanation: 2.Use a scatterplot to show how the daily rental volume varies with temperature. Use a different series (with different colors) for each season.
End of explanation
"""
plt.scatter(
weather[weather['season_desc'] == 'winter']['windspeed'],
weather[weather['season_desc'] == 'winter']['total_riders'],
s=30, color='blue', label='winter')
plt.scatter(
weather[weather['season_desc'] == 'spring']['windspeed'],
weather[weather['season_desc'] == 'spring']['total_riders'],
s=30, color='magenta', label='spring')
plt.scatter(
weather[weather['season_desc'] == 'summer']['windspeed'],
weather[weather['season_desc'] == 'summer']['total_riders'],
s=30, color='cyan', label='summer')
plt.scatter(
weather[weather['season_desc'] == 'fall']['windspeed'],
weather[weather['season_desc'] == 'fall']['total_riders'],
s=30, color='yellow', label='fall')
plt.title("Daily rental volume and windspeed")
plt.legend(loc=1)
plt.show()
"""
Explanation: 3.Create another scatterplot to show how daily rental volume varies with windspeed. As above, use a different series for each season.
End of explanation
"""
stations = station [['station', 'lat', 'long']]
stations
count = usage['station_start'].value_counts()
average_rental_df = DataFrame({ 'average_rental' : count / 365})
average_rental_df.index = range(185)
average_rental_df
merged_df = pd.concat([stations, average_rental_df], axis=1)
plt.scatter(merged_df['long'], merged_df['lat'], color=c_cycle, alpha=0.5, s=(merged_df['average_rental']*10), label='Location of stations', )
plt.legend(bbox_to_anchor=(1.2, 0.2), loc='lower right', borderaxespad=0)
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Rental volume and geography')
plt.show()
"""
Explanation: 4.How do the rental volumes vary with geography? Compute the average daily rentals for each station and use this as the radius for a scatterplot of each station's latitude and longitude.
End of explanation
"""
|
AmberJBlue/aima-python | agents.ipynb | mit | from agents import *
class BlindDog(Agent):
def eat(self, thing):
print("Dog: Ate food at {}.".format(self.location))
def drink(self, thing):
print("Dog: Drank water at {}.".format( self.location))
dog = BlindDog()
"""
Explanation: AGENT
An agent, as defined in 2.1 is anything that can perceive its <b>environment</b> through sensors, and act upon that environment through actuators based on its <b>agent program</b>. This can be a dog, robot, or even you. As long as you can perceive the environment and act on it, you are an agent. This notebook will explain how to implement a simple agent, create an environment, and create a program that helps the agent act on the environment based on its percepts.
Before moving on, review the </b>Agent</b> and </b>Environment</b> classes in <b>agents.py</b>.
Let's begin by importing all the functions from the agents.py module and creating our first agent - a blind dog.
End of explanation
"""
print(dog.alive)
"""
Explanation: What we have just done is create a dog who can only feel what's in his location (since he's blind), and can eat or drink. Let's see if he's alive...
End of explanation
"""
class Food(Thing):
pass
class Water(Thing):
pass
class Park(Environment):
def percept(self, agent):
'''prints & return a list of things that are in our agent's location'''
things = self.list_things_at(agent.location)
print(things)
return things
def execute_action(self, agent, action):
'''changes the state of the environment based on what the agent does.'''
if action == "move down":
agent.movedown()
elif action == "eat":
items = self.list_things_at(agent.location, tclass=Food)
if len(items) != 0:
if agent.eat(items[0]): #Have the dog pick eat the first item
self.delete_thing(items[0]) #Delete it from the Park after.
elif action == "drink":
items = self.list_things_at(agent.location, tclass=Water)
if len(items) != 0:
if agent.drink(items[0]): #Have the dog drink the first item
self.delete_thing(items[0]) #Delete it from the Park after.
def is_done(self):
'''By default, we're done when we can't find a live agent,
but to prevent killing our cute dog, we will or it with when there is no more food or water'''
no_edibles = not any(isinstance(thing, Food) or isinstance(thing, Water) for thing in self.things)
dead_agents = not any(agent.is_alive() for agent in self.agents)
return dead_agents or no_edibles
"""
Explanation: This is our dog. How cool is he? Well, he's hungry and needs to go search for food. For him to do this, we need to give him a program. But before that, let's create a park for our dog to play in.
ENVIRONMENT
A park is an example of an environment because our dog can perceive and act upon it. The <b>Environment</b> class in agents.py is an abstract class, so we will have to create our own subclass from it before we can use it. The abstract class must contain the following methods:
<li><b>percept(self, agent)</b> - returns what the agent perceives</li>
<li><b>execute_action(self, agent, action)</b> - changes the state of the environment based on what the agent does.</li>
End of explanation
"""
from ipythonblocks import BlockGrid
from agents import *
color = {"Breeze": (225, 225, 225),
"Pit": (0,0,0),
"Gold": (253, 208, 23),
"Glitter": (253, 208, 23),
"Wumpus": (43, 27, 23),
"Stench": (128, 128, 128),
"Explorer": (0, 0, 255),
"Wall": (44, 53, 57)
}
def program(percepts):
'''Returns an action based on it's percepts'''
print(percepts)
return input()
w = WumpusEnvironment(program, 7, 7)
grid = BlockGrid(w.width, w.height, fill=(123, 234, 123))
def draw_grid(world):
global grid
grid[:] = (123, 234, 123)
for x in range(0, len(world)):
for y in range(0, len(world[x])):
if len(world[x][y]):
grid[y, x] = color[world[x][y][-1].__class__.__name__]
def step():
global grid, w
draw_grid(w.get_world())
grid.show()
w.step()
step()
"""
Explanation: Wumpus Environment
End of explanation
"""
class BlindDog(Agent):
location = 1
def movedown(self):
self.location += 1
def eat(self, thing):
'''returns True upon success or False otherwise'''
if isinstance(thing, Food):
print("Dog: Ate food at {}.".format(self.location))
return True
return False
def drink(self, thing):
''' returns True upon success or False otherwise'''
if isinstance(thing, Water):
print("Dog: Drank water at {}.".format(self.location))
return True
return False
def program(percepts):
'''Returns an action based on it's percepts'''
for p in percepts:
if isinstance(p, Food):
return 'eat'
elif isinstance(p, Water):
return 'drink'
return 'move down'
park = Park()
dog = BlindDog(program)
dogfood = Food()
water = Water()
park.add_thing(dog, 0)
park.add_thing(dogfood, 5)
park.add_thing(water, 7)
park.run(10)
"""
Explanation: PROGRAM
Now that we have a <b>Park</b> Class, we need to implement a <b>program</b> module for our dog. A program controls how the dog acts upon it's environment. Our program will be very simple, and is shown in the table below.
<table>
<tr>
<td><b>Percept:</b> </td>
<td>Feel Food </td>
<td>Feel Water</td>
<td>Feel Nothing</td>
</tr>
<tr>
<td><b>Action:</b> </td>
<td>eat</td>
<td>drink</td>
<td>move up</td>
</tr>
</table>
End of explanation
"""
class Park(XYEnvironment):
def percept(self, agent):
'''prints & return a list of things that are in our agent's location'''
things = self.list_things_at(agent.location)
print(things)
return things
def execute_action(self, agent, action):
'''changes the state of the environment based on what the agent does.'''
if action == "move down":
agent.movedown()
elif action == "eat":
items = self.list_things_at(agent.location, tclass=Food)
if len(items) != 0:
if agent.eat(items[0]): #Have the dog pick eat the first item
self.delete_thing(items[0]) #Delete it from the Park after.
elif action == "drink":
items = self.list_things_at(agent.location, tclass=Water)
if len(items) != 0:
if agent.drink(items[0]): #Have the dog drink the first item
self.delete_thing(items[0]) #Delete it from the Park after.
def is_done(self):
'''By default, we're done when we can't find a live agent,
but to prevent killing our cute dog, we will or it with when there is no more food or water'''
no_edibles = not any(isinstance(thing, Food) or isinstance(thing, Water) for thing in self.things)
dead_agents = not any(agent.is_alive() for agent in self.agents)
return dead_agents or no_edibles
"""
Explanation: That's how easy it is to implement an agent, its program, and environment. But that was a very simple case. What if our environment was 2-Dimentional instead of 1? And what if we had multiple agents?
To make our Park 2D, we will need to make it a subclass of <b>XYEnvironment</b> instead of Environment. Also, let's add a person to play fetch with the dog.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.17/_downloads/26e7a9a235c1f1a45a51c99f55fafe0d/plot_background_filtering.ipynb | bsd-3-clause | import numpy as np
from scipy import signal, fftpack
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
"""
Explanation: Background information on filtering
Here we give some background information on filtering in general,
and how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus [1] and
Ifeachor and Jervis [2], and for filtering in an
M/EEG context we recommend reading Widmann et al. 2015 [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py
tutorial.
Problem statement
The practical issues with filtering electrophysiological data are covered
well by Widmann et al. in [7]_, in a follow-up to an article where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase SNR, but if it is not used carefully,
it can distort data. Here we hope to cover some filtering basics so
users can better understand filtering tradeoffs, and why MNE-Python has
chosen particular defaults.
Filtering basics
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
\begin{align}H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + ... + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + ... + a_N z^{-M}} \
&= \frac{\sum_0^Mb_kz^{-k}}{\sum_1^Na_kz^{-k}}\end{align}
In the time domain, the numerator coefficients $b_k$ and denominator
coefficients $a_k$ can be used to obtain our output data
$y(n)$ in terms of our input data $x(n)$ as:
\begin{align}:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + ... + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - ... - a_N y(n - N)\\
&= \sum_0^M b_k x(n-k) - \sum_1^N a_k y(n-k)\end{align}
In other words, the output at time $n$ is determined by a sum over:
1. The numerator coefficients $b_k$, which get multiplied by
the previous input $x(n-k)$ values, and
2. The denominator coefficients $a_k$, which get multiplied by
the previous output $y(n-k)$ values.
Note that these summations in :eq:summations correspond nicely to
(1) a weighted moving average and (2) an autoregression.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients $b_k$ ($\forall k, a_k=0$), and thus each output
value of $y(n)$ depends only on the $M$ previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in [1]_, FIR and IIR have different tradeoffs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann et al.
2015 [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002 [2]_, p. 321),
...FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always tradeoffs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency tradeoff, and it will
show up below.
FIR Filters
First we will focus first on FIR filters, which are the default filters used by
MNE-Python.
Designing FIR filters
Here we'll try designing a low-pass filter, and look at trade-offs in terms
of time- and frequency-domain filter characteristics. Later, in
tut_effect_on_signals, we'll look at how such filters can affect
signals when they are used.
First let's import some useful tools for filtering, and set some default
values for our data that are reasonable for M/EEG data.
End of explanation
"""
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
"""
Explanation: Take for example an ideal low-pass filter, which would give a value of 1 in
the pass-band (up to frequency $f_p$) and a value of 0 in the stop-band
(down to frequency $f_s$) such that $f_p=f_s=40$ Hz here
(shown to a lower limit of -60 dB for simplicity):
End of explanation
"""
n = int(round(0.1 * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 sec)', flim=flim)
"""
Explanation: This filter hypothetically achieves zero ripple in the frequency domain,
perfect attenuation, and perfect steepness. However, due to the discontunity
in the frequency response, the filter would require infinite ringing in the
time domain (i.e., infinite order) to be realized. Another way to think of
this is that a rectangular window in frequency is actually sinc_ function
in time, which requires an infinite number of samples, and thus infinite
time, to represent. So although this filter has ideal frequency suppression,
it has poor time-domain characteristics.
Let's try to naïvely make a brick-wall filter of length 0.1 sec, and look
at the filter itself in the time domain and the frequency domain:
End of explanation
"""
n = int(round(1. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 sec)', flim=flim)
"""
Explanation: This is not so good! Making the filter 10 times longer (1 sec) gets us a
bit better stop-band suppression, but still has a lot of ringing in
the time domain. Note the x-axis is an order of magnitude longer here,
and the filter has a correspondingly much longer group delay (again equal
to half the filter length, or 0.5 seconds):
End of explanation
"""
n = int(round(10. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 sec)', flim=flim)
"""
Explanation: Let's make the stop-band tighter still with a longer filter (10 sec),
with a resulting larger x-axis:
End of explanation
"""
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
"""
Explanation: Now we have very sharp frequency suppression, but our filter rings for the
entire second. So this naïve method is probably not a good way to build
our low-pass filter.
Fortunately, there are multiple established methods to design FIR filters
based on desired response characteristics. These include:
1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
2. Windowed FIR design (:func:`scipy.signal.firwin2`, `MATLAB fir2`_
and :func:`scipy.signal.firwin`)
3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
4. Frequency-domain design (construct filter in Fourier
domain and use an :func:`IFFT <scipy.fftpack.ifft>` to invert it)
<div class="alert alert-info"><h4>Note</h4><p>Remez and least squares designs have advantages when there are
"do not care" regions in our frequency response. However, we want
well controlled responses in all frequency regions.
Frequency-domain construction is good when an arbitrary response
is desired, but generally less clean (due to sampling issues) than
a windowed approach for more straightfroward filter applications.
Since our filters (low-pass, high-pass, band-pass, band-stop)
are fairly simple and we require precise control of all frequency
regions, here we will use and explore primarily windowed FIR
design.</p></div>
If we relax our frequency-domain filter requirements a little bit, we can
use these functions to construct a lowpass filter that instead has a
transition band, or a region between the pass frequency $f_p$
and stop frequency $f_s$, e.g.:
End of explanation
"""
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (1.0 sec)',
flim=flim)
"""
Explanation: Accepting a shallower roll-off of the filter in the frequency domain makes
our time-domain response potentially much better. We end up with a
smoother slope through the transition region, but a much cleaner time
domain signal. Here again for the 1 sec filter:
End of explanation
"""
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.5 sec)',
flim=flim)
"""
Explanation: Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
use a shorter filter (5 cycles at 10 Hz = 0.5 sec) and still get okay
stop-band attenuation:
End of explanation
"""
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.2 sec)',
flim=flim)
"""
Explanation: But then if we shorten the filter too much (2 cycles of 10 Hz = 0.2 sec),
our effective stop frequency gets pushed out past 60 Hz:
End of explanation
"""
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50-Hz transition (0.2 sec)',
flim=flim)
"""
Explanation: If we want a filter that is only 0.1 seconds long, we should probably use
something more like a 25 Hz transition band (0.2 sec = 5 cycles @ 25 Hz):
End of explanation
"""
h_min = mne.fixes.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
"""
Explanation: So far we have only discussed acausal filtering, which means that each
sample at each time point $t$ is filtered using samples that come
after ($t + \Delta t$) and before ($t - \Delta t$) $t$.
In this sense, each sample is influenced by samples that come both before
and after it. This is useful in many cases, espcially because it does not
delay the timing of events.
However, sometimes it can be beneficial to use causal filtering,
whereby each sample $t$ is filtered only using time points that came
after it.
Note that the delay is variable (whereas for linear/zero-phase filters it
is constant) but small in the pass-band. Unlike zero-phase filters, which
require time-shifting backward the output of a linear-phase filtering stage
(and thus becoming acausal), minimum-phase filters do not require any
compensation to achieve small delays in the passband. Note that as an
artifact of the minimum phase filter construction step, the filter does
not end up being as steep as the linear/zero-phase version.
We can construct a minimum-phase filter from our existing linear-phase
filter with the minimum_phase function (that will be in SciPy 0.19's
:mod:scipy.signal), and note that the falloff is not as steep:
End of explanation
"""
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
"""
Explanation: Applying FIR filters
Now lets look at some practical effects of these filters by applying
them to some data.
Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
plus noise (random + line). Note that the original, clean signal contains
frequency content in both the pass band and transition bands of our
low-pass filter.
End of explanation
"""
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band / 2. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin')
x_v16 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim)
"""
Explanation: Filter it with a shallow cutoff, linear-phase FIR (which allows us to
compensate for the constant filter delay):
End of explanation
"""
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin2')
x_v14 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim)
"""
Explanation: Filter it with a different design mode fir_design="firwin2", and also
compensate for the constant filter delay. This method does not produce
quite as sharp a transition compared to fir_design="firwin", despite
being twice as long:
End of explanation
"""
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur,
fir_design='firwin2')
x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim)
"""
Explanation: This is actually set to become the default type of filter used in MNE-Python
in 0.14 (see tut_filtering_in_python).
Let's also filter with the MNE-Python 0.13 default, which is a
long-duration, steep cutoff FIR that gets applied twice:
End of explanation
"""
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim)
"""
Explanation: Let's also filter it with the MNE-C default, which is a long-duration
steep-slope FIR filter designed using frequency-domain techniques:
End of explanation
"""
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum', fir_design='firwin')
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
"""
Explanation: And now an example of a minimum-phase filter:
End of explanation
"""
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (sec)', xlim=t[[0, -1]])
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16)))
axes[1].set(xlim=flim)
yticks = np.arange(7) / -30.
yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)',
'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_v16, offset=yticks[2])
plot_signal(x_v14, offset=yticks[3])
plot_signal(x_v13, offset=yticks[4])
plot_signal(x_mne_c, offset=yticks[5])
plot_signal(x_min, offset=yticks[6])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.200, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
"""
Explanation: Both the MNE-Python 0.13 and MNE-C filhters have excellent frequency
attenuation, but it comes at a cost of potential
ringing (long-lasting ripples) in the time domain. Ringing can occur with
steep filters, especially on signals with frequency content around the
transition band. Our Morlet wavelet signal has power in our transition band,
and the time-domain ringing is thus more pronounced for the steep-slope,
long-duration filter than the shorter, shallower-slope filter:
End of explanation
"""
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim)
# Eventually this will just be from scipy signal.sosfiltfilt, but 0.18 is
# not widely adopted yet (as of June 2016), so we use our wrapper...
sosfiltfilt = mne.fixes.get_sosfiltfilt()
x_shallow = sosfiltfilt(sos, x)
"""
Explanation: IIR filters
MNE-Python also offers IIR filtering functionality that is based on the
methods from :mod:scipy.signal. Specifically, we use the general-purpose
functions :func:scipy.signal.iirfilter and :func:scipy.signal.iirdesign,
which provide unified interfaces to IIR filter design.
Designing IIR filters
Let's continue with our design of a 40 Hz low-pass filter, and look at
some trade-offs of different IIR filters.
Often the default IIR filter is a Butterworth filter_, which is designed
to have a maximally flat pass-band. Let's look at a few orders of filter,
i.e., a few different number of coefficients used and therefore steepness
of the filter:
<div class="alert alert-info"><h4>Note</h4><p>Notice that the group delay (which is related to the phase) of
the IIR filters below are not constant. In the FIR case, we can
design so-called linear-phase filters that have a constant group
delay, and thus compensate for the delay (making the filter
acausal) if necessary. This cannot be done with IIR filters, as
they have a non-linear phase (non-constant group delay). As the
filter order increases, the phase distortion near and in the
transition band worsens. However, if acausal (forward-backward)
filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
these phase issues can theoretically be mitigated.</p></div>
End of explanation
"""
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=8', flim=flim)
x_steep = sosfiltfilt(sos, x)
"""
Explanation: The falloff of this filter is not very steep.
<div class="alert alert-info"><h4>Note</h4><p>Here we have made use of second-order sections (SOS)
by using :func:`scipy.signal.sosfilt` and, under the
hood, :func:`scipy.signal.zpk2sos` when passing the
``output='sos'`` keyword argument to
:func:`scipy.signal.iirfilter`. The filter definitions
given in tut_filtering_basics_ use the polynomial
numerator/denominator (sometimes called "tf") form ``(b, a)``,
which are theoretically equivalent to the SOS form used here.
In practice, however, the SOS form can give much better results
due to issues with numerical precision (see
:func:`scipy.signal.sosfilt` for an example), so SOS should be
used when possible to do IIR filtering.</p></div>
Let's increase the order, and note that now we have better attenuation,
with a longer impulse response:
End of explanation
"""
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=1) # dB of acceptable pass-band ripple
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim)
"""
Explanation: There are other types of IIR filters that we can use. For a complete list,
check out the documentation for :func:scipy.signal.iirdesign. Let's
try a Chebychev (type I) filter, which trades off ripple in the pass-band
to get better attenuation in the stop-band:
End of explanation
"""
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=6)
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim)
"""
Explanation: And if we can live with even more ripple, we can get it slightly steeper,
but the impulse response begins to ring substantially longer (note the
different x-axis scale):
End of explanation
"""
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
"""
Explanation: Applying IIR filters
Now let's look at how our shallow and steep Butterworth IIR filters
perform on our Morlet signal from before:
End of explanation
"""
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = r'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
"""
Explanation: Some pitfalls of filtering
Multiple recent papers have noted potential risks of drawing
errant inferences due to misapplication of filters.
Low-pass problems
Filters in general, especially those that are acausal (zero-phase), can make
activity appear to occur earlier or later than it truly did. As
mentioned in VanRullen 2011 [3], investigations of commonly (at the time)
used low-pass filters created artifacts when they were applied to smulated
data. However, such deleterious effects were minimal in many real-world
examples in Rousselet 2012 [5].
Perhaps more revealing, it was noted in Widmann & Schröger 2012 [6] that
the problematic low-pass filters from VanRullen 2011 [3]:
Used a least-squares design (like :func:scipy.signal.firls) that
included "do-not-care" transition regions, which can lead to
uncontrolled behavior.
Had a filter length that was independent of the transition bandwidth,
which can cause excessive ringing and signal distortion.
High-pass problems
When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
were found in Acunzo et al. 2012 [4]_ to:
"...generate a systematic bias easily leading to misinterpretations of
neural activity.”
In a related paper, Widmann et al. 2015 [7] also came to suggest a 0.1 Hz
highpass. And more evidence followed in Tanner et al. 2015 [8] of such
distortions. Using data from language ERP studies of semantic and syntactic
processing (i.e., N400 and P600), using a high-pass above 0.3 Hz caused
significant effects to be introduced implausibly early when compared to the
unfiltered data. From this, the authors suggested the optimal high-pass
value for language processing to be 0.1 Hz.
We can recreate a problematic simulation from Tanner et al. 2015 [8]_:
"The simulated component is a single-cycle cosine wave with an amplitude
of 5µV, onset of 500 ms poststimulus, and duration of 800 ms. The
simulated component was embedded in 20 s of zero values to avoid
filtering edge effects... Distortions [were] caused by 2 Hz low-pass and
high-pass filters... No visible distortion to the original waveform
[occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
(12 dB/octave roll-off)."
<div class="alert alert-info"><h4>Note</h4><p>This simulated signal contains energy not just within the
pass-band, but also within the transition and stop-bands -- perhaps
most easily understood because the signal has a non-zero DC value,
but also because it is a shifted cosine that has been
*windowed* (here multiplied by a rectangular window), which
makes the cosine and DC frequencies spread to other frequencies
(multiplication in time is convolution in frequency, so multiplying
by a rectangular window in the time domain means convolving a sinc
function with the impulses at DC and the cosine frequency in the
frequency domain).</p></div>
End of explanation
"""
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
"""
Explanation: Similarly, in a P300 paradigm reported by Kappenman & Luck 2010 [12]_,
they found that applying a 1 Hz high-pass decreased the probaility of
finding a significant difference in the N100 response, likely because
the P300 response was smeared (and inverted) in time by the high-pass
filter such that it tended to cancel out the increased N100. However,
they nonetheless note that some high-passing can still be useful to deal
with drifts in the data.
Even though these papers generally advise a 0.1 HZ or lower frequency for
a high-pass, it is important to keep in mind (as most authors note) that
filtering choices should depend on the frequency content of both the
signal(s) of interest and the noise to be suppressed. For example, in
some of the MNE-Python examples involving ch_sample_data,
high-pass values of around 1 Hz are used when looking at auditory
or visual N100 responses, because we analyze standard (not deviant) trials
and thus expect that contamination by later or slower components will
be limited.
Baseline problems (or solutions?)
In an evolving discussion, Tanner et al. 2015 [8] suggest using baseline
correction to remove slow drifts in data. However, Maess et al. 2016 [9]
suggest that baseline correction, which is a form of high-passing, does
not offer substantial advantages over standard high-pass filtering.
Tanner et al. [10]_ rebutted that baseline correction can correct for
problems with filtering.
To see what they mean, consider again our old simulated signal x from
before:
End of explanation
"""
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
"""
Explanation: In response, Maess et al. 2016 [11]_ note that these simulations do not
address cases of pre-stimulus activity that is shared across conditions, as
applying baseline correction will effectively copy the topology outside the
baseline period. We can see this if we give our signal x with some
consistent pre-stimulus activity, which makes everything look bad.
<div class="alert alert-info"><h4>Note</h4><p>An important thing to keep in mind with these plots is that they
are for a single simulated sensor. In multielectrode recordings
the topology (i.e., spatial pattiern) of the pre-stimulus activity
will leak into the post-stimulus period. This will likely create a
spatially varying distortion of the time-domain signals, as the
averaged pre-stimulus spatial pattern gets subtracted from the
sensor time courses.</p></div>
Putting some activity in the baseline period:
End of explanation
"""
# Use the same settings as when calling e.g., `raw.filter()`
fir_coefs = mne.filter.create_filter(data=None, # Data is only used for sanity checking, not strictly needed # noqa
sfreq=1000., # sfreq of your data in Hz
l_freq=None,
h_freq=40., # assuming a lowpass of 40 Hz
method='fir',
fir_window='hamming',
fir_design='firwin',
verbose=True)
# See the printed log for the transition bandwidth and filter length
# Alternatively, get the filter length through:
filter_length = fir_coefs.shape[0]
"""
Explanation: Both groups seem to acknowledge that the choices of filtering cutoffs, and
perhaps even the application of baseline correction, depend on the
characteristics of the data being investigated, especially when it comes to:
The frequency content of the underlying evoked activity relative
to the filtering parameters.
The validity of the assumption of no consistent evoked activity
in the baseline period.
We thus recommend carefully applying baseline correction and/or high-pass
values based on the characteristics of the data to be analyzed.
Filtering defaults
Defaults in MNE-Python
Most often, filtering in MNE-Python is done at the :class:mne.io.Raw level,
and thus :func:mne.io.Raw.filter is used. This function under the hood
(among other things) calls :func:mne.filter.filter_data to actually
filter the data, which by default applies a zero-phase FIR filter designed
using :func:scipy.signal.firwin. In Widmann et al. 2015 [7]_, they
suggest a specific set of parameters to use for high-pass filtering,
including:
"... providing a transition bandwidth of 25% of the lower passband
edge but, where possible, not lower than 2 Hz and otherwise the
distance from the passband edge to the critical frequency.”
In practice, this means that for each high-pass value l_freq or
low-pass value h_freq below, you would get this corresponding
l_trans_bandwidth or h_trans_bandwidth, respectively,
if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
+------------------+-------------------+-------------------+
| l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
+==================+===================+===================+
| 0.01 | 0.01 | 2.0 |
+------------------+-------------------+-------------------+
| 0.1 | 0.1 | 2.0 |
+------------------+-------------------+-------------------+
| 1.0 | 1.0 | 2.0 |
+------------------+-------------------+-------------------+
| 2.0 | 2.0 | 2.0 |
+------------------+-------------------+-------------------+
| 4.0 | 2.0 | 2.0 |
+------------------+-------------------+-------------------+
| 8.0 | 2.0 | 2.0 |
+------------------+-------------------+-------------------+
| 10.0 | 2.5 | 2.5 |
+------------------+-------------------+-------------------+
| 20.0 | 5.0 | 5.0 |
+------------------+-------------------+-------------------+
| 40.0 | 10.0 | 10.0 |
+------------------+-------------------+-------------------+
| 45.0 | 11.25 | 5.0 |
+------------------+-------------------+-------------------+
| 48.0 | 12.0 | 2.0 |
+------------------+-------------------+-------------------+
MNE-Python has adopted this definition for its high-pass (and low-pass)
transition bandwidth choices when using l_trans_bandwidth='auto' and
h_trans_bandwidth='auto'.
To choose the filter length automatically with filter_length='auto',
the reciprocal of the shortest transition bandwidth is used to ensure
decent attenuation at the stop frequency. Specifically, the reciprocal
(in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming,
or Blackman windows, respectively as selected by the fir_window
argument for fir_design='firwin', and double these for
fir_design='firwin2' mode.
<div class="alert alert-info"><h4>Note</h4><p>For ``fir_design='firwin2'``, the multiplicative factors are
doubled compared to what is given in Ifeachor and Jervis [2]_
(p. 357), as :func:`scipy.signal.firwin2` has a smearing effect
on the frequency response, which we compensate for by
increasing the filter length. This is why
``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``.</p></div>
In 0.14, we default to using a Hamming window in filter design, as it
provides up to 53 dB of stop-band attenuation with small pass-band ripple.
<div class="alert alert-info"><h4>Note</h4><p>In band-pass applications, often a low-pass filter can operate
effectively with fewer samples than the high-pass filter, so
it is advisable to apply the high-pass and low-pass separately
when using ``fir_design='firwin2'``. For design mode
``fir_design='firwin'``, there is no need to separate the
operations, as the lowpass and highpass elements are constructed
separately to meet the transition band requirements.</p></div>
For more information on how to use the
MNE-Python filtering functions with real data, consult the preprocessing
tutorial on
sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py.
Defaults in MNE-C
MNE-C by default uses:
5 Hz transition band for low-pass filters.
3-sample transition band for high-pass filters.
Filter length of 8197 samples.
The filter is designed in the frequency domain, creating a linear-phase
filter such that the delay is compensated for as is done with the MNE-Python
phase='zero' filtering option.
Squared-cosine ramps are used in the transition regions. Because these
are used in place of more gradual (e.g., linear) transitions,
a given transition width will result in more temporal ringing but also more
rapid attenuation than the same transition width in windowed FIR designs.
The default filter length will generally have excellent attenuation
but long ringing for the sample rates typically encountered in M-EEG data
(e.g. 500-2000 Hz).
Defaults in other software
A good but possibly outdated comparison of filtering in various software
packages is available in [7]_. Briefly:
EEGLAB
MNE-Python in 0.14 defaults to behavior very similar to that of EEGLAB,
see the EEGLAB filtering FAQ_ for more information.
Fieldrip
By default FieldTrip applies a forward-backward Butterworth IIR filter
of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
high-pass filters). Similar filters can be achieved in MNE-Python when
filtering with :meth:raw.filter(..., method='iir') <mne.io.Raw.filter>
(see also :func:mne.filter.construct_iir_filter for options).
For more information, see e.g. FieldTrip band-pass documentation_.
Reporting Filters
On page 45 in Widmann et al. [7]_, there is a convenient list of important
filter parameters that should be reported with each publication:
filtertype (high-pass, low-pass, band-pass, band-stop, FIR, IIR)
cutoff frequency (including definition)
filter order (or length)
roll-off or transition bandwidth
passband ripple and stopband attenuation
filter delay (zero-phase, linear-phase, non-linear phase) and causality
direction of computation (one-pass forward/reverse,or two-pass forward and
reverse)
In the following, we will address how to deal with these parameters in MNE:
Filter type
Depending on the function or method used, the filter type can be specified.
To name an example. in :func:mne.filter.create_filter, the relevant
arguments would be l_freq, h_freg, method, and if the method is FIR:
fir_window, and fir_design.
Cutoff frequency
The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the
middle of the transition band. That is, if you construct a lowpass FIR filter
with h_freq = 40., the filter function will provide a transition
bandwidth that depens on the h_trans_bandwidth argument. The desired
half-amplitude cutoff of the lowpass FIR filter is then at:
h_freq + transition_bandwidth/2..
Filter length (order) and transition bandwidth (roll-off)
In the tut_filtering_in_python section, we have already talked about
the default filter lengths and transition bandwidths that are used when no
custom values are specified using the respective filter function's arguments.
If you want to find out about the filter length and transition bandwidth that
were used through the 'auto' setting, you can use
:func:mne.filter.create_filter to print out the settings once more:
End of explanation
"""
|
AstroHackWeek/AstroHackWeek2016 | breakouts/gaussian_process/GaussianProcessTasteTest.ipynb | mit | %matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('seaborn-whitegrid')
"""
Explanation: Gaussian Process Taste-test
The scikit-learn package has a nice Gaussian Process example - but what is it doing? In this notebook, we review the mathematics of Gaussian Processes, and then 1) run the scikit-learn example, 2) do the same thing by-hand with numpy/scipy, and finally 3) use the GPy package to compare a few different kernels, on the same test dataset.
Super-brief Introduction to GPs
Let us look at the basics of Gaussian Processes in one dimension. See Rasmussen and Williams for a great, pedagogically smooth introduction to Gaussian Processes that will teach you everything you will need to get started.
We denote $\vec{x}=(x_0, \dots, x_N)$ a vector of 1D input values. A 1D Gaussian process $f$ is such that
$f \sim \mathcal{GP} \ \Longleftrightarrow \ p(f(\vec{x}), f(\vec{x}'))\ \mathrm{is\ Gaussian} \ \forall \vec{x}, \vec{x}'$.
It is fully characterized by a mean function and a kernel,
$$\begin{eqnarray}m(\vec{x}) &=& \mathbb{E}[ f(\vec{x}) ]\
k(\vec{x}, \vec{x}') &=& \mathbb{E}[ (f(\vec{x})-m(\vec{x}))(f(\vec{x}')-m(\vec{x}')) ]\end{eqnarray}$$
Let us consider a noisy dataset $(\vec{x},\vec{y})$ with Gaussian homoskedastic errors $\epsilon$ that are Gaussian distributed with standard deviation $\sigma$. Fitting a Gaussian Process to this data is equivalent to considering a set of basis functions ${\phi_i(x)}$ and finding the optimal weights ${\omega_i}$, which we assume to be Gaussian distributed with some covariance $\Sigma$. It can also be thought of as fitting for an unknown correlated noise term in the data.
$$\begin{eqnarray}
\vec{y} &=& f(\vec{x}) + \vec{\epsilon}\
\vec{\epsilon} &\sim & \mathcal{N}(0,\sigma^2 I)\
f(\vec{x}) &=& \sum_i \omega_i \phi_i(\vec{x}) = \vec{\omega}^T \vec{\phi}(\vec{x}) \
\vec{\omega} &\sim & \mathcal{N}(0,\Sigma)\
\end{eqnarray}$$
In this case, the mean function is assumed to be zero, $m(\vec{x}) = 0$. (This is not actually very constraining, as Rasmussen and Williams explain, and it is not equivalent to assuming that the mean of $f$ is zero.)
There are as many weights as there are data points, which makes the function $f$ very flexible. The weights are constrained by their Gaussian distribution, though. Importantly, the kernel is fully characterized by the choice of basis functions, via
$$\quad k(\vec{x},\vec{x}') = \vec{\phi}(\vec{x})^T \Sigma\ \vec{\phi}(\vec{x}')$$
Picking a set of basis functions is equivalent to picking a kernel, and vice versa. In the correlated noise model interpretation its the kernel function that makes more sense. Typically a kernel will have a handful of hyper-parameters $\vec{\theta}$, that govern the shape of the basis function and correlation structure of the covariance matrix of the predictions. These hyper-parameters can in be inferred from the data, via their log likelihood:
$$ \log p(\vec{y} | \vec{x},\vec{\theta}) = \frac{1}{2} \vec{y}^T K^{-1} \vec{y} - \frac{1}{2} \log |K| - \frac{n}{2} \log 2\pi $$
(Here, the matrix $K$ has elements $K_{ij} = k(x_i,x_j) + \sigma^2 \delta_{ij}$. Note that evaluating the likelihood for $\theta$ involves computing the determinant of the matrix $K$.) Fitting the hyper-parameters is often done by maximizing this likelihood - but that only gets you the "best-fit" hyper-parameters. Posterior samples of the hyper-parameters can be drawn by MCMC in the usual way.
For any given set of hyper-parameters, we can use the Gaussian Process to predict new outputs $\vec{y}^$ at inputs $\vec{x}^$. Thanks to the magic of Gaussian distributions and linear algebra, one can show that the posterior distribution for the process evaluated at new inputs $\vec{x}^*$ given a fit to the existing values $(\vec{x},\vec{y})$ is also Gaussian:
$$p( f(\vec{x}^) | \vec{y}, \vec{x}, \vec{x}^ ) \ = \ \mathcal{N}(\bar{f}, \bar{k})$$
The mean of this PDF for $f(\vec{x}^*)$ is
$$\bar{f} \ =\ k(\vec{x}^*,\vec{x})[k(\vec{x},\vec{x}) + \sigma^2 I]^{-1} \vec{y}$$
and its covariance is
$$\bar{k} = k(\vec{x}^,\vec{x}^) - k(\vec{x},\vec{x}^) [k(\vec{x},\vec{x}) + \sigma^2 I]^{-1}k(\vec{x},\vec{x}^)^T $$
Once the kernel is chosen, one can fit the data and make predictions for new data in a single linear algebra operation. Note that multiple matrix inversions and multiplications are involved, so Gaussian Processes can be computationally very expensive - and that the weights are being optimized during the arithmetic calculation of $\bar{f}$.
Inferring the hyper-parameters of the kernel make GPs even more expensive, thanks to the determinant calculation involved.
To generate large numbers of predictions, one just makes a long vector $\vec{x}^*$. We'll see this in the code below, when generating smooth functions to plot through sparse and noisy data. The mean prediction $\bar{f}$ is linear in the input data $y$, which is quite remarkable.
The above is all the math you need to run Gaussian Processes in simple situations. Here is a list of more advanced topics that you should think about when applying Gaussian Processes to real data:
- Generalizing to multiple input dimensions (keeping one output dimension) is trivial, but the case of multiple outputs is not (partly because it is less natural).
- Choosing a physically motivated kernel or a kernel that simplifies the computation, for example by yielding sparse matrices.
- Parametrizing the kernel and/or the mean function and inferring these hyperparameters from the data.
- Using a small fraction of the data to make predictions. This is referred to as Sparse Gaussian Processes. Finding an optimal "summary" subset of the data is key.
- Gaussian Processes natively work with Gaussian noise / likelihood functions. With non-Gaussian cases, some analytical results are no longer valid (e.g. the marginal likelihood) but approximations exist.
- What if the inputs $\vec{x}$ have uncertainties? There are various way to deal with this, but this is much more intensive than normal Gaussian Processes.
End of explanation
"""
def f(x):
"""The function to predict."""
return x * np.sin(x)
def make_data(N, rseed=1):
np.random.seed(rseed)
# Create some observations with noise
X = np.random.uniform(low=0.1, high=9.9, size=N)
X = np.atleast_2d(X).T
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
return X, y, dy
X, y, dy = make_data(20)
"""
Explanation: Make Some Data
End of explanation
"""
# Get the master version of scikit-learn; new GP code isn't in release
# This needs to compile things, so it will take a while...
# Uncomment the following:
# !pip install git+git://github.com/scikit-learn/scikit-learn.git
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF as SquaredExponential
from sklearn.gaussian_process.kernels import ConstantKernel as Amplitude
# Instanciate a Gaussian Process model
kernel = Amplitude(1.0, (1E-3, 1E3)) * SquaredExponential(10, (1e-2, 1e2))
# Instantiate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel,
alpha=(dy / y)**2, # fractional errors in data
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the hyper-parameters
gp.fit(X, y)
gp.kernel_
# note: gp.kernel is the initial kernel
# gp.kernel_ (with an underscore) is the fitted kernel
gp.kernel_.get_params()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x_pred = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x_pred, return_std=True)
def plot_results(X, y, dy, x_pred, y_pred, sigma):
fig = plt.figure(figsize=(8, 6))
plt.plot(x_pred, f(x_pred), 'k:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='k.', markersize=10, label=u'Observations',
ecolor='gray')
plt.plot(x_pred, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x_pred, x_pred[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.3, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left');
plot_results(X, y, dy, x_pred, y_pred, sigma)
"""
Explanation: Gaussian Process Regression with Scikit-Learn
Example adapted from Scikit-learn's Examples
End of explanation
"""
import scipy.linalg
KXX = gp.kernel_(X)
A = KXX + np.diag((dy/y)**2.)
L = scipy.linalg.cholesky(A, lower=True)
KXXp = gp.kernel_(x_pred, X)
KXpXp = gp.kernel_(x_pred)
alpha = scipy.linalg.cho_solve((L, True), y)
y_pred = np.dot(KXXp, alpha) + np.mean(y, axis=0)
v = scipy.linalg.cho_solve((L, True), KXXp.T)
y_pred_fullcov = KXpXp - KXXp.dot(v)
sigma = np.sqrt(np.diag(y_pred_fullcov))
plot_results(X, y, dy, x_pred, y_pred, sigma)
"""
Explanation: Gaussian Processes by-hand
Let us run the same example but solving the Gaussian Process equations by hand.
Let's use the kernel constructed with scikit-learn (because its parameters are optimized)
And let's compute the Gaussian process manually using Scipy linalg
End of explanation
"""
import GPy
kernels = [GPy.kern.RBF(input_dim=1),
GPy.kern.Brownian(input_dim=1),
GPy.kern.Matern32(input_dim=1),
GPy.kern.Matern52(input_dim=1),
GPy.kern.ExpQuad(input_dim=1),
GPy.kern.Cosine(input_dim=1)]
names = ['Gaussian', 'Brownian', 'Mattern32', 'Matern52', 'ExpQuad', 'Cosine']
fig, axs = plt.subplots(3, 2, figsize=(12, 12), sharex=True, sharey=True)
axs = axs.ravel()
for i, k in enumerate(kernels):
m = GPy.models.GPRegression(X, y[:,None], kernel=k)
m.optimize()
m.plot_f(ax=axs[i], plot_data=True, samples=4, legend=False, plot_limits=[0, 10])
# plotting four samples of the GP posterior too
axs[i].errorbar(X, y, yerr=dy, fmt="o", c='k')
axs[i].set_title(names[i])
axs[i].plot(x_pred, f(x_pred), 'k:', label=u'$f(x) = x\,\sin(x)$')
fig.tight_layout()
"""
Explanation: Quick kernel comparison with GPy
Let's now use the GPy package and compare a couple of kernels applied to our example.
We'll optimize the parameters in each case. We not only plot the mean and std dev of the process
but also a few samples. As you can see, they look very different, and the choice of kernel is critical!
End of explanation
"""
|
QuantEcon/QuantEcon.notebooks | permanent_income.ipynb | bsd-3-clause | import quantecon as qe
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
%matplotlib inline
np.set_printoptions(suppress=True, precision=4)
"""
Explanation: Permanent Income Model
Chase Coleman and Thomas Sargent
This notebook maps instances of the linear-quadratic-Gaussian permanent income model
with $\beta R = 1$ into a linear state space system, applies two different approaches to solving the model and compares outcomes from those two approaches. After confirming that answers produced by the two methods agree, it applies the quantecon LinearStateSpace class to illustrate various features of the model.
Besides being a workhorse model for analyzing consumption data, the model is good for illustrating the concepts of
* stationarity
* ergodicity
* ensemble moments and cross section observations
* cointegration
* linear-quadratic dynamic programming problems
Background readings on the linear-quadratic-Gaussian permanent income model are Robert Hall's 1978 JPE paper ``Stochastic Implications of the Life Cycle-Permanent Income Hypothesis: Theory and Evidence'' and chapter 2 of Recursive Macroeconomic Theory
Let's get started
End of explanation
"""
# Possible parameters
# alpha, beta, rho1, rho2, sigma
params = [[10.0, 0.95, 1.2, -0.3, 1.0],
[10.0, 0.95, 0.9, 0.0, 1.0],
[10.0, 0.95, 0.0, -0.0, 10.0]]
# Set parameters
alpha, beta, rho1, rho2, sigma = params[1]
# Note: LinearStateSpace object runs into iteration limit in computing stationary variance when we set
# sigma = .5 -- replace with doublej2 to fix this. Do some more testing
R = 1/beta
A = np.array([[1., 0., 0.],
[alpha, rho1, rho2],
[0., 1., 0.]])
C = np.array([[0.], [sigma], [0.]])
G = np.array([[0., 1., 0.]])
# for later use, form LinearStateSpace system and pull off steady state moments
mu_z0 = np.array([[1.0], [0.0], [0.0]])
sig_z0 = np.zeros((3, 3))
Lz = qe.LinearStateSpace(A, C, G, mu_0=mu_z0, Sigma_0=sig_z0)
muz, muy, Sigz, Sigy = Lz.stationary_distributions()
# mean vector of state for the savings problem
mxo = np.vstack([muz, 0.0])
# create stationary covariance matrix of x -- start everyone off at b=0
a1 = np.zeros((3, 1))
aa = np.hstack([Sigz, a1])
bb = np.zeros((1, 4))
sxo = np.vstack([aa, bb])
# These choices will initialize the state vector of an individual at zero debt
# and the ergodic distribution of the endowment process. Use these to create
# the Bewley economy.
mxbewley = mxo
sxbewley = sxo
"""
Explanation: Plan of the notebook
We study a version of the linear-quadratic-Gaussian model described in section 2.12 of chapter 2 of Ljungqvist and Sargent's Recursive Macroeconomic Theory
We solve the model in two ways:
as an LQ dynamic programming problem, and
as a system of expectational difference equations with boundary conditions that advise us to solve stable roots backwards and unstable roots forwards (see appendix A of chapter 2 of Ljungqvist and Sargent).
We confirm numerically that these two methods give rise to approximately the same solution. The adverb approximately is appropriate because we use a technical trick to map the problem into a well behaved LQ dynamic programming problem.
The model
The LQ permanent income model is an example of a
``savings problem.''
A consumer has preferences over consumption streams
that are ordered by
the utility functional
$$ E_0 \sum_{t=0}^\infty \beta^t u(c_t), \quad(1) $$
where $E_t$ is the mathematical expectation conditioned
on the consumer's time $t$ information, $c_t$ is time $t$ consumption,
$u(c)$ is a strictly concave one-period utility function, and
$\beta \in (0,1)$ is a discount factor. The LQ model gets its name partly from assuming that the
utility function $u$ is quadratic:
$$ u(c) = -.5(c - \gamma)^2 $$
where $\gamma>0$ is a bliss level of consumption.
The consumer maximizes
the utility functional (1) by choosing a consumption, borrowing plan
${c_t, b_{t+1}}_{t=0}^\infty$ subject to the sequence of budget constraints
$$ c_t + b_t = R^{-1} b_{t+1} + y_t, t \geq 0, \quad(2) $$
where $y_t$ is an exogenous
stationary endowment process, $R$ is a constant gross
risk-free interest rate, $b_t$ is one-period risk-free debt maturing at
$t$, and $b_0$ is a given initial condition. We shall assume
that $R^{-1} = \beta$. Equation (2) is linear. We use another set of linear equations
to model the endowment process. In particular, we assume that the endowment
process has the state-space representation
$$ \eqalign{ z_{t+1} & = A_{22} z_t + C_2 w_{t+1} \cr
y_t & = U_y z_t \cr} \quad (3) $$
where $w_{t+1}$ is an i.i.d. process with mean zero and
identity contemporaneous covariance matrix, $A_{22}$ is a stable matrix,
its eigenvalues being strictly below unity in modulus, and
$U_y$ is a selection vector that identifies $y$ with a particular
linear combination of the $z_t$.
We impose the following condition on the
consumption, borrowing plan:
$$ E_0 \sum_{t=0}^\infty \beta^t b_t^2 < +\infty. \quad (4) $$
This condition suffices to rule out Ponzi schemes. (We impose this condition to
rule out a borrow-more-and-more plan that would allow the household to
enjoy bliss consumption forever.)
The state vector confronting the household at $t$ is
$$ x_t = \left[\matrix{z_t \cr b_t\cr}\right]',$$
where $b_t$ is its one-period debt falling
due at the beginning of period $t$
and $z_t$ contains all variables useful for
forecasting its future endowment.
We shall solve the problem two ways.
First, as a linear-quadratic control dynamic programming problem that we can solve using the LQ class.
Second, as a set of expectational difference equations that we can solve with homemade programs.
Solution as an LQ problem
We can map the problem into a linear-quadratic dynamic programming problem, also known
as an optimal linear regulator problem.
The stochastic discounted linear optimal regulator problem is to
choose a decision rule for $u_t$ to
maximize
$$ - E_0\sum_{t=0}^\infty \beta^t {x'_t Rx_t+u'_tQu_t},\quad 0<\beta<1,$$
subject to $x_0$ given, and the law of motion
$$x_{t+1} = A x_t+ Bu_t+ C w_{t+1},\qquad t\geq 0, $$
where $w_{t+1}$ is an $(n\times 1)$ vector of random variables that is
independently and identically distributed according to the normal
distribution with mean vector zero and covariance matrix
$Ew_t w'_t= I .$
The value function for this problem is
$v(x)= - x'Px-d,$
where $P$ is the unique positive semidefinite solution of the discounted
algebraic matrix Riccati equation corresponding to the limit of iterations on matrix Riccati difference
equation
$$P_{j+1} =R+\beta A'P_j A-\beta^2 A'P_jB(Q+\beta B'P_jB)^{-1} B'P_jA.$$
from $P_0=0$. The optimal policy is $u_t=-Fx_t$, where $F=\beta (Q+\beta
B'PB)^{-1} B'PA$.
The scalar $d$ is given by
$ d=\beta(1-\beta)^{-1} {\rm trace} ( P C C') . $
Under an optimal decision rule $F$, the state vector $x_t$ evolves according to
$$ x_{t+1} = (A-BF) x_t + C w_{t+1} $$
$$ \left[\matrix{z_{t+1} \cr b_{t+1} \cr}\right] = \left[\matrix{ A_{22} & 0 \cr R(U_\gamma - U_y) & R } \right]\left[\matrix{z_{t} \cr b_{t} \cr}\right] +
\left[\matrix{0 \cr R}\right] (c_t - \gamma) + \left[\matrix{ C_t \cr 0 } \right] w_{t+1} $$
or
$$ x_{t+1} = A x_t + B u_t + C w_{t+1} $$
We form the quadratic form $x_t' \bar R x_t + u_t'Q u_t $ with
$Q =1$ and $\bar R$ a $ 4 \times 4$ matrix with all elements zero except for a very small entry
$\alpha >0$ in the $(4,4)$ position. (We put the $\bar \cdot$ over the $R$ to avoid ``recycling''
the $R$ notation!)
We begin by creating an instance of the state-space system (2) that governs the income ${y_t}$ process. We assume
it is a second order univariate autoregressive process:
$$ y_{t+1} = \alpha + \rho_1 y_t + \rho_2 y_{t-1} + \sigma w_{t+1} $$
End of explanation
"""
#
# Here we create the matrices for our system
#
A12 = np.zeros((3,1))
ALQ_l = np.hstack([A, A12])
ALQ_r = np.array([[0, -R, 0, R]])
ALQ = np.vstack([ALQ_l, ALQ_r])
RLQ = np.array([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1e-9]])
QLQ = np.array([1.0])
BLQ = np.array([0., 0., 0., R]).reshape(4,1)
CLQ = np.array([0., sigma, 0., 0.]).reshape(4,1)
betaLQ = beta
print("We can inspect the matrices that describe our system below")
print("A = \n", ALQ)
print("B = \n", BLQ)
print("R = \n", RLQ)
print("Q = \n", QLQ)
"""
Explanation: It turns out that the bliss level of consumption $\gamma$ in the utility function $-.5 (c_t -\gamma)^2$
has no effect on the optimal decision rule.
(We shall see why below when we inspect the Euler equation for consumption.)
Now create the objects for the optimal linear regulator.
Here we will use a trick to induce the Bellman equation to respect restriction (4) on the debt sequence
${b_t}$. To accomplish that, we'll put a very small penalty on $b_t^2$ in the criterion function.
That will induce a (hopefully) small approximation error in the decision rule. We'll check whether it really is small numerically soon.
End of explanation
"""
LQPI = qe.LQ(QLQ, RLQ, ALQ, BLQ, C=CLQ, beta=betaLQ)
"""
Explanation: Now create the appropriate instance of an LQ model
End of explanation
"""
P, F, d = LQPI.stationary_values() # Compute optimal value function and decision rule
ABF = ALQ - np.dot(BLQ,F) # Form closed loop system
"""
Explanation: Now create the optimal policies using the analytic formulas.
We'll save the answers and will compare them with answers we get by employing an alternative solution method.
End of explanation
"""
# Use the above formulas to create the optimal policies for $b_{t+1}$ and $c_t$
b_pol = np.dot(G, la.inv(np.eye(3, 3) - beta*A)).dot(A - np.eye(3, 3))
c_pol = (1 - beta)*np.dot(G, la.inv(np.eye(3, 3) - beta*A))
#Create the A matrix for a LinearStateSpace instance
A_LSS1 = np.vstack([A, b_pol])
A_LSS2 = np.eye(4, 1, -3)
A_LSS = np.hstack([A_LSS1, A_LSS2])
# Create the C matrix for LSS methods
C_LSS = np.vstack([C, np.zeros(1)])
# Create the G matrix for LSS methods
G_LSS1 = np.vstack([G, c_pol])
G_LSS2 = np.vstack([np.zeros(1), -(1 - beta)])
G_LSS = np.hstack([G_LSS1, G_LSS2])
# use the following values to start everyone off at b=0, initial incomes zero
# Initial Conditions
mu_0 = np.array([1., 0., 0., 0.])
sigma_0 = np.zeros((4, 4))
"""
Explanation: Solution via a system of expectational difference equations
Now we will solve the household's optimum problem by first deducing the Euler equations that are the first-order conditions with respect to consumption and savings, then using the budget constraints and the boundary condition (4) to complete a system of expectational linear difference equations that we'll solve for the optimal consumption, debt plan.
First-order conditions for the problem are
$$ E_t u'(c_{t+1}) = u'(c_t) , \ \ \forall t \geq 0. \quad (5) $$
In our linear-quadratic model, we assume
the quadratic utility function
$u(c_t) = -.5 (c_t - \gamma)^2$,
where $\gamma$ is a bliss level of consumption. Then the consumption Euler equation becomes
$$ E_t c_{t+1} = c_t . \quad (6) $$
Along with the quadratic utility specification, we allow consumption
$c_t$ to be negative.
To deduce the optimal decision rule, we want to solve the system
of difference equations formed by (2) and (6)
subject to the boundary condition (4). To accomplish this,
solve (2) forward and impose $\lim_{T\rightarrow +\infty} \beta^T b_{T+1} =0$ to get
$$ b_t = \sum_{j=0}^\infty \beta^j (y_{t+j} - c_{t+j}) . \quad (7) $$
Imposing $\lim_{T\rightarrow +\infty} \beta^T b_{T+1} =0$ suffices to impose (4) on the debt
path.
Take conditional expectations on both sides of (7) and use (6)
and the law of iterated expectations to deduce
$$ b_t = \sum_{j=0}^\infty \beta^j E_t y_{t+j} - {1 \over 1-\beta} c_t
\quad (8) $$
or
$$ c_t = (1-\beta)
\left[ \sum_{j=0}^\infty \beta^j E_t y_{t+j} - b_t\right].
\quad (9) $$
If we define the net rate of interest $r$ by $\beta ={1 \over 1+r}$, we can
also express this
equation as
$$ c_t = {r \over 1+r}
\left[ \sum_{j=0}^\infty \beta^j E_t y_{t+j} - b_t\right]. \quad (10) $$
Equation (9) or (10) asserts that consumption equals what Irving Fisher defined as
economic income, namely, a constant
marginal propensity to consume or interest factor ${r \over 1+r}$ times
the sum of nonfinancial wealth $
\sum_{j=0}^\infty \beta^j E_t y_{t+j}$ and financial
wealth $-b_t$. Notice that (9) or (10) represents
$c_t$ as a function of the state $[b_t, z_t]$
confronting the household, where from $z_t$ contains all
information useful for forecasting the endowment process.
Pulling together our preceding results, we can regard $z_t, b_t$ as
the time $t$ state, where $z_t$ is an exogenous component of the state
and $b_t$ is an endogenous component of the state vector. The system
can be represented as
$$ \eqalign{ z_{t+1} & = A_{22} z_t + C_2 w_{t+1} \cr
b_{t+1} & = b_t + U_y [ (I -\beta A_{22})^{-1} (A_{22} - I) ] z_t \cr
y_t & = U_y z_t \cr
c_t & = (1-\beta) [ U_y(I-\beta A_{22})^{-1} z_t - b_t ]. \cr } \quad (11) $$
Now we'll apply the formulas in equation system (11).
Later we shall use them to get objects needed to form the system (11) as an instance of a LinearStateSpace class that we'll use to exhibit features of the LQ permanent income model.
End of explanation
"""
ABF - A_LSS
"""
Explanation: A_LSS calculated as we have here should equal ABF calculated above using the LQ model.
Here comes the check. The difference between ABF and A_LSS should be zero
End of explanation
"""
print(c_pol, "\n", -F)
"""
Explanation: Now compare pertinent elements of c_pol and -F
End of explanation
"""
LSS = qe.LinearStateSpace(A_LSS, C_LSS, G_LSS, mu_0=mu_0, Sigma_0=sigma_0)
"""
Explanation: We have verified that the two methods give the same solution.
Now let's create an instance of a LinearStateSpace model.
To do this, we'll use the outcomes from out second method.
Two examples
Now we'll generate panels of consumers. We'll study two examples that are differentiated only by the initial states with which we endow consumers. All other parameter values are kept the same in the two examples.
In the first example, all consumers begin with zero nonfinancial income and zero debt. The consumers are thus ex ante identical.
In the second example, consumers are ex ante heterogeneous. While all of them begin with zero debt, we draw their initial income levels from the invariant distribution of financial income.
In the first example, consumers' nonfinancial income paths will display prounounced transients early in the sample that will affect outcomes in striking ways. Those transient effects will not be present in the second example.
Now we'll use methods that the LinearStateSpace class contains to simulate the model with our first set of intitial conditions.
25 paths of the exogenous non-financial income process and the associated consumption and debt paths. In the first set of graphs, the darker lines depict one particular sample path, while the lighter lines indicate the other 24 paths.
A second graph that plots a collection of simulations against the population distribution that we extract from the LinearStateSpace instance LSS
End of explanation
"""
def income_consumption_debt_series(A, C, G, m0, s0, T=150, npaths=25):
"""
This function takes initial conditions (m0, s0) and uses the Linear State Space
class from QuantEcon to simulate an economy `npaths` times for `T` periods.
It then uses that information to generate some graphs related to the discussion
below.
"""
LSS = qe.LinearStateSpace(A, C, G, mu_0=m0, Sigma_0=s0)
# Simulation/Moment Parameters
moment_generator = LSS.moment_sequence()
# Simulate various paths
bsim = np.empty((npaths, T))
csim = np.empty((npaths, T))
ysim = np.empty((npaths, T))
for i in range(npaths):
sims = LSS.simulate(T)
bsim[i, :] = sims[0][-1, :]
csim[i, :] = sims[1][1, :]
ysim[i, :] = sims[1][0, :]
# Get the moments
cons_mean = np.empty(T)
cons_var = np.empty(T)
debt_mean = np.empty(T)
debt_var = np.empty(T)
for t in range(T):
mu_x, mu_y, sig_x, sig_y = next(moment_generator)
cons_mean[t], cons_var[t] = mu_y[1], sig_y[1, 1]
debt_mean[t], debt_var[t] = mu_x[3], sig_x[3, 3]
return bsim, csim, ysim, cons_mean, cons_var, debt_mean, debt_var
def consumption_income_debt_figure(bsim, csim, ysim):
# Get T
T = bsim.shape[1]
# Create first figure
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
xvals = np.arange(T)
# Plot consumption and income
ax[0].plot(csim[0, :], label="c", color="b")
ax[0].plot(ysim[0, :], label="y", color="g")
ax[0].plot(csim.T, alpha=.1, color="b")
ax[0].plot(ysim.T, alpha=.1, color="g")
ax[0].legend(loc=4)
ax[0].set_xlabel("t")
ax[0].set_ylabel("y and c")
# Plot debt
ax[1].plot(bsim[0, :], label="b", color="r")
ax[1].plot(bsim.T, alpha=.1, color="r")
ax[1].legend(loc=4)
ax[1].set_xlabel("t")
ax[1].set_ylabel("debt")
fig.suptitle("Nonfinancial Income, Consumption, and Debt")
return fig
def consumption_debt_fanchart(csim, cons_mean, cons_var,
bsim, debt_mean, debt_var):
# Get T
T = bsim.shape[1]
# Create Percentiles of cross-section distributions
cmean = np.mean(cons_mean)
c90 = 1.65*np.sqrt(cons_var)
c95 = 1.96*np.sqrt(cons_var)
c_perc_95p, c_perc_95m = cons_mean + c95, cons_mean - c95
c_perc_90p, c_perc_90m = cons_mean + c90, cons_mean - c90
# Create Percentiles of cross-section distributions
dmean = np.mean(debt_mean)
d90 = 1.65*np.sqrt(debt_var)
d95 = 1.96*np.sqrt(debt_var)
d_perc_95p, d_perc_95m = debt_mean + d95, debt_mean - d95
d_perc_90p, d_perc_90m = debt_mean + d90, debt_mean - d90
# Create second figure
fig2, ax2 = plt.subplots(2, 1, figsize=(10, 8))
xvals = np.arange(T)
# Consumption fan
ax2[0].plot(xvals, cons_mean, color="k")
ax2[0].plot(csim.T, color="k", alpha=.25)
ax2[0].fill_between(xvals, c_perc_95m, c_perc_95p, alpha=.25, color="b")
ax2[0].fill_between(xvals, c_perc_90m, c_perc_90p, alpha=.25, color="r")
ax2[0].set_ylim((cmean-15, cmean+15))
ax2[0].set_ylabel("consumption")
# Debt fan
ax2[1].plot(xvals, debt_mean, color="k")
ax2[1].plot(bsim.T, color="k", alpha=.25)
ax2[1].fill_between(xvals, d_perc_95m, d_perc_95p, alpha=.25, color="b")
ax2[1].fill_between(xvals, d_perc_90m, d_perc_90p, alpha=.25, color="r")
# ax2[1].set_ylim()
ax2[1].set_ylabel("debt")
fig2.suptitle("Consumption/Debt over time")
ax2[1].set_xlabel("t")
return fig2
# Creates pictures with initial conditions of 0.0 for y and b
out = income_consumption_debt_series(A_LSS, C_LSS, G_LSS, mu_0, sigma_0)
bsim0, csim0, ysim0 = out[:3]
cons_mean0, cons_var0, debt_mean0, debt_var0 = out[3:]
fig_0 = consumption_income_debt_figure(bsim0, csim0, ysim0)
fig_02 = consumption_debt_fanchart(csim0, cons_mean0, cons_var0,
bsim0, debt_mean0, debt_var0)
fig_0.show()
fig_02.show()
"""
Explanation: Population and sample panels
In the code below, we use the LinearStateSpace class to
compute and plot population quantiles of the distributions of consumption and debt for a population of consumers
simulate a group of 25 consumers and plot sample paths on the same graph as the population distribution
End of explanation
"""
def cointegration_figure(bsim, csim):
"""
Plots the cointegration
"""
# Create figure
fig, ax = plt.subplots(figsize=(10, 8))
ax.plot((1-beta)*bsim[0, :] + csim[0, :], color="k")
ax.plot((1-beta)*bsim.T + csim.T, color="k", alpha=.1)
fig.suptitle("Cointegration of Assets and Consumption")
ax.set_xlabel("t")
ax.set_ylabel("")
return fig
fig = cointegration_figure(bsim0, csim0)
fig.show()
"""
Explanation: First example
Here is what is going on in the above graphs.
Because we have set $y_{-1} = y_{-2} = 0$, nonfinancial income $y_t$ starts far below its stationary mean
$\mu_{y, \infty}$ and rises early in each simulation.
To help interpret the behavior above graph, recall that we can represent the optimal decision rule for consumption
in terms of the co-integrating relationship
$$ (1-\beta) b_t + c_t = (1-\beta) E_t \sum_{j=0}^\infty \beta^j y_{t+j}, $$
For our simulation, we have set initial conditions $b_0 = y_{-1} = y_{-2} = 0$ (please see the code above).
So at time $0$ we have
$$ c_0 = (1-\beta) E_0 \sum_{t=0}^\infty \beta^j y_{t} . $$
This tells us that consumption starts at the value of an annuity from the expected discounted value of nonfinancial
income. To support that level of consumption, the consumer borrows a lot early on, building up substantial debt.
In fact, he or she incurs so much debt that eventually, in the stochastic steady state, he consumes less each period than his income. He uses the gap between consumption and income mostly to service the interest payments due on his debt.
Thus, when we look at the panel of debt in the accompanying graph, we see that this is a group of ex ante indentical people each of whom starts with zero debt. All of them accumulate debt in anticipation of rising nonfinancial income. The expect their nonfinancial income to rise toward the invariant distribution of income, a consequence of our having started them at $y_{-1} = y_{-2} = 0$.
Illustration of cointegration
The LQ permanent income model is a good one for illustrating the concept of cointegration.
The following figure plots realizations of the left side of
$$ (1-\beta) b_t + c_t = (1-\beta) E_t \sum_{j=0}^\infty \beta^j y_{t+j}, \quad (12) $$
which is called the cointegrating residual.
Notice that it equals the right side, namely, $(1-\beta) E_t \sum_{j=0}^\infty \beta^j y_{t+j}$,
which equals an annuity payment on the expected present value of future income $E_t \sum_{j=0}^\infty \beta^j y_{t+j}$.
Early along a realization, $c_t$ is approximately constant while $(1-\beta) b_t$ and $(1-\beta) E_t \sum_{j=0}^\infty \beta^j y_{t+j}$ both rise markedly as the household's present value of income and borrowing rise pretty much together.
Note: This example illustrates the following point: the definition of cointegration implies that the cointegrating residual is asymptotically covariance stationary, not covariance stationary. The cointegrating residual for the specification with zero income and zero debt initially has a notable transient component that dominates its behavior early in the sample. By specifying different initial conditions, we shall remove this transient in our second example to be presented below.
End of explanation
"""
# Creates pictures with initial conditions of 0.0 for b and y from invariant distribution
out = income_consumption_debt_series(A_LSS, C_LSS, G_LSS, mxbewley, sxbewley)
bsimb, csimb, ysimb = out[:3]
cons_meanb, cons_varb, debt_meanb, debt_varb = out[3:]
fig_0 = consumption_income_debt_figure(bsimb, csimb, ysimb)
fig_02 = consumption_debt_fanchart(csimb, cons_meanb, cons_varb,
bsimb, debt_meanb, debt_varb)
fig = cointegration_figure(bsimb, csimb)
fig.show()
"""
Explanation: A "borrowers and lenders" closed economy
When we set $y_{-1} = y_{-2} = 0$ and $b_0 =0$ in the preceding exercise, we make debt "head north" early in the sample. Average debt rises and approaches asymptote.
We can regard these as outcomes of a ``small open economy'' that borrows from abroad at the fixed gross interest rate $R$ in anticipation of rising incomes.
So with the economic primitives set as above, the economy converges to a steady state in which there is an excess aggregate supply of risk-free loans at a gross interest rate of $R$. This excess supply is filled by ``foreigner lenders'' willing to make those loans.
We can use virtually the same code to rig a "poor man's Bewley model" in the following way.
as before, we start everyone at $b_0 = 0$.
But instead of starting everyone at $y_{-1} = y_{-2} = 0$, we draw $\begin{bmatrix} y_{-1} \cr y_{-2}
\end{bmatrix}$ from the invariant distribution of the ${y_t}$ process.
This rigs a closed economy in which people are borrowing and lending with each other at a gross risk-free
interest rate of $R = \beta^{-1}$. Here within the group of people being analyzed, risk-free loans are in zero excess supply. We have arranged primitives so that $R = \beta^{-1}$ clears the market for risk-free loans at zero aggregate excess supply. There is no need for foreigners to lend to our group.
The following graphs confirm the following outcomes:
as before, the consumption distribution spreads out over time. But now there is some initial dispersion because there is ex ante heterogeneity in the initial draws of $\begin{bmatrix} y_{-1} \cr y_{-2}
\end{bmatrix}$.
as before, the cross-section distribution of debt spreads out over time.
Unlike before, the average level of debt stays at zero, reflecting that this is a closed borrower-and-lender economy.
Now the cointegrating residual seems stationary, and not just asymptotically stationary.
End of explanation
"""
|
pbstark/DKDHondt14 | danmark14EU.ipynb | mit | from __future__ import division
from __future__ import print_function
import math
import numpy as np
def dHondt(partyTotals, seats, divisors):
'''
allocate <seats> seats to parties according to <partyTotals> votes,
using D'Hondt proportional allocation with <weights> divisors
Input:
partyTotals: list of total votes by party
seats: total number of seats to allocate
divisors: divisors for proportional allocation. For d'Hondt, divisors are 1, 2, 3, ...
Returns:
partySeats: list of number of seats for each party
seated: list of tuples--parties with at least one seat,
number of votes that party got,
and divisor for last seated in the party
notSeated: list of tuples--parties with at least one lost seat,
number of votes that party got,
and divisor for the first non-seated in the party
pseudoCandidates: matrix of votes for each pseudocandidate
'''
pseudoCandidates = np.array([partyTotals,]*seats, ).T/divisors.astype(float)
sortedPC = np.sort(np.ravel(pseudoCandidates))
lastSeated = sortedPC[-seats]
theSeated = np.where(pseudoCandidates >= lastSeated)
partySeats = np.bincount(theSeated[0], minlength=len(partyTotals)) # number of seats for each party
inx = np.nonzero(partySeats)[0] # only those with at least one seat
seated = list(zip(inx, partyTotals[inx], divisors[partySeats[inx]-1]))
# parties with at least one seat,
# number of votes that party got,
# and divisor for last seated in the party
theNotSeated = np.where(pseudoCandidates < lastSeated)
partyNotSeats = np.bincount(theNotSeated[0], minlength=len(partyTotals)) # number of non-seats for each party
inx = np.nonzero(partyNotSeats)[0]
notSeated = list(zip(inx, partyTotals[inx], divisors[partySeats[inx]]))
# parties with at least one unseated,
# number of votes that party got,
# and divisor for the first non-seated in the party
if (lastSeated == sortedPC[-(seats+1)]):
raise ValueError("Tied contest for the last seat!")
else:
return partySeats, seated, notSeated, lastSeated, pseudoCandidates
def uMax(win, lose):
'''
finds the upper bound u on the MICRO for the contest
win and lose are lists of triples: [party, tally(party), divisor]
the divisor for win is the largest divisor for any seat the party won
the divisor for lose is the smallest divisor for any seat the party lost
See Stark and Teague, 2014, equation 7.
Input:
win: list of triples--party, tally(party), divisor
lose: list of triples--party, tally(party), divisor
Returns:
maximum possible relative overstatement for any ballot
'''
new_u = 0.0
u = new_u
for w in win:
for ell in lose:
if w[0] != ell[0]:
new_u = (ell[2] + w[2]) / (ell[2]*w[1] - w[2]*ell[1])
u = max(u, new_u)
# print "%s," % ((round(u, 7), round(new_u, 7), w, ell),) # u: %.4g, new_u: %.4g, winner: %s, loser: %s
return u
def minSampleSize(ballots, u, gamma=0.95, alpha=0.1):
'''
find smallest sample size for ballot-level comparison audit
using risk-limit alpha, and cushion gamma \in (0,1)
1/alpha = (gamma/(1-1/(ballots*u))+1-gamma)**n
Input:
ballots: number of ballots cast in the contest
u: upper bound on overstatement per ballot
gamma: hedge against finding a ballot that attains the upper bound. Larger values give
less protection
alpha: risk limit
'''
return math.ceil(math.log(1.0/alpha) / math.log(gamma/(1.0-1.0/(ballots*u)) + 1.0 - gamma))
"""
Explanation: Risk-limiting audit code for Proportional Representation via Highest Averages
Code and demo from Verifiable European Elections: Risk-limiting
Audits for D’Hondt and its relatives by
Philip B. Stark and
Vanessa Teague, March 26, 2015
Routines:
* dHondt(partyTotals, seats, divisors)
* uMax(win, lose)
* minSampleSize(ballots, u, gamma=0.95, alpha=0.1)
Demonstrated on Denmark's 2014 European Union Parliamentary election
which uses an open list proportional representation voting method, with seats allocated via the highest averages method.
Parties can form coalitions, in which case first seats are allocated across the coalitions, and then from the the seats for each coalition, the parties within the coalition are allocated seats.
Works with both Python 2.7 and Python 3
How the ballots look and are marked (for party, for candidate), from Altinget.dk: This is how the European Parliament will change EU elections
<br>
<img style="float: left;" src="https://www.altinget.dk/images/article/145258/16579.jpg">
End of explanation
"""
# Final 2014 Danish EU Parliamentary election results from http://www.dst.dk/valg/Valg1475795/valgopg/valgopgHL.htm
# There were two coalitions: (A,B,F) and (C,V)
#
# Official results by party
#
A = 435245
B = 148949
C = 208262
F = 249305
I = 65480
N = 183724
O = 605889
V = 379840
Ballots = 2332217 # includes invalid and blank ballots
nSeats = 13 # seats to allocate
#
# allocate seats to coalitions
#
coalitionTotals = np.array([A+B+F, C+V, I, N, O]) # for coalitions
coalitionSeats, coalitionSeated, coalitionNotSeated, coalitionLastSeated, coalitionPCs = dHondt(coalitionTotals, nSeats, np.arange(1, nSeats+1))
print('A+B+F, C+V, I, N, O:', coalitionSeats)
#
# allocate seats within coalitions
#
nABFSeats = coalitionSeats[0]
nCVSeats = coalitionSeats[1]
ABFSeats, ABFSeated, ABFNotSeated, ABFLastSeated, ABFPCs = dHondt(np.array([A, B, F]), nABFSeats, np.arange(1, nABFSeats+1))
CVSeats, CVSeated, CVNotSeated, CVLastSeated, CVPCs = dHondt(np.array([C, V]), nCVSeats, np.arange(1, nCVSeats+1))
#
print('A, B, F:', ABFSeats, '; C, V:', CVSeats)
#
ASeats = ABFSeats[0]
BSeats = ABFSeats[1]
CSeats = CVSeats[0]
FSeats = ABFSeats[2]
ISeats = coalitionSeats[2]
NSeats = coalitionSeats[3]
OSeats = coalitionSeats[4]
VSeats = CVSeats[1]
allSeats = [ASeats, BSeats, CSeats, FSeats, ISeats, NSeats, OSeats, VSeats]
print('---------------\nSeats to parties A, B, C, F, I, N, O, V: ', allSeats)
print('Seated coalitions, votes, divisor:', coalitionSeated)
print('Non-Seated coalitions, votes, divisor:', coalitionNotSeated)
"""
Explanation: Re-tally election results
End of explanation
"""
gamma = 0.95 # tuning constant in the Kaplan-Wald method
alpha = 0.001 # risk limit
u = uMax(coalitionSeated, coalitionNotSeated)
print("Minimum ballot-level comparison sample size = %d\n for max total overstatement = %.2f, u = %.4g, gamma = %.2f, alpha = %.3f" %
(minSampleSize(Ballots, u, gamma, alpha), Ballots*u, u, gamma, alpha))
"""
Explanation: Audit: initial sample size
End of explanation
"""
|
chengsoonong/crowdastro | notebooks/106-passive.ipynb | mit | import astropy.io.ascii as asc, numpy, h5py, sklearn.linear_model, crowdastro.crowd.util, pickle, scipy.spatial
import matplotlib.pyplot as plt
%matplotlib inline
with open('/Users/alger/data/Crowdastro/sets_atlas.pkl', 'rb') as f:
atlas_sets = pickle.load(f)
atlas_sets_compact = atlas_sets['RGZ & compact']
atlas_sets_resolved = atlas_sets['RGZ & resolved']
with open('/Users/alger/data/Crowdastro/sets_swire.pkl', 'rb') as f:
swire_sets = pickle.load(f)
swire_sets_compact = swire_sets['RGZ & compact']
swire_sets_resolved = swire_sets['RGZ & resolved']
with h5py.File('/Users/alger/data/Crowdastro/swire.h5') as f:
swire_features = f['features'].value
with h5py.File('/Users/alger/data/Crowdastro/crowdastro-swire.h5') as f:
swire_names = [i.decode('ascii') for i in f['/swire/cdfs/string'].value]
swire_coords = f['/swire/cdfs/numeric'][:, :2]
swire_labels = {i['swire']: i['rgz_label'] for i in asc.read('/Users/alger/data/SWIRE/all_labels.csv')}
table = asc.read('/Users/alger/data/Crowdastro/one-table-to-rule-them-all.tbl')
swire_tree = scipy.spatial.KDTree(swire_coords)
"""
Explanation: Passive
Plots a passive learning curve w.r.t. ATLAS objects. Trained, tested on RGZ, split on compact/resolved. Testing on RGZ instead of Norris because we believe it to be reasonably accurate and it's also a lot bigger; if we want a good idea of how this curve levels out we really want to use as much data as possible. Splitting on compact/resolved because we expect compact to level out a lot faster (possibly very fast indeed).
End of explanation
"""
def test_on_atlas_sets(atlas_sets, swire_sets):
subset_sizes = numpy.logspace(numpy.log2(5),
numpy.log2(len(atlas_sets[0][0])),
base=2, num=10)
n_atlas = []
n_swire = []
bas = []
for (train, test), (_, test_swire) in zip(atlas_sets, swire_sets):
key_to_row = {}
for row in table:
key_to_row[row['Key']] = row
for subset_size in subset_sizes:
print(subset_size, end=' ')
# Subsample train.
subset_size = int(subset_size)
train_subset = list(train)
numpy.random.shuffle(train_subset)
train_subset = train_subset[:subset_size]
# Get coords.
ras = [key_to_row[k]['Component RA (Franzen)'] for k in train_subset]
decs = [key_to_row[k]['Component DEC (Franzen)'] for k in train_subset]
coords = list(zip(ras, decs))
# Find nearby SWIREs.
nearby = sorted({int(i) for i in numpy.concatenate(swire_tree.query_ball_point(coords, 1 / 60))})
# Train on the features.
features = swire_features[nearby]
labels = [swire_labels[swire_names[n]] == 'True' for n in nearby]
lr = sklearn.linear_model.LogisticRegression(class_weight='balanced', C=1e10)
lr.fit(features, labels)
# Compute accuracy.
test_labels = [swire_labels[swire_names[n]] == 'True' for n in test_swire]
test_features = swire_features[test_swire]
acc = crowdastro.crowd.util.balanced_accuracy(test_labels, lr.predict(test_features))
n_atlas.append(int(subset_size))
n_swire.append(len(nearby))
bas.append(acc)
print()
return n_atlas, n_swire, bas
n_atlas, n_swire, bas = test_on_atlas_sets(atlas_sets_compact, swire_sets_compact)
plt.scatter(n_atlas, bas, alpha=0.7)
plt.title('Passive Learning Curve — Compact')
plt.xlabel('Number of radio objects')
plt.ylabel('Balanced accuracy')
plt.xscale('log')
"""
Explanation: Compact
End of explanation
"""
n_atlas_resolved, n_swire_resolved, bas_resolved = test_on_atlas_sets(atlas_sets_resolved, swire_sets_resolved)
plt.scatter(n_atlas_resolved, bas_resolved, alpha=0.7)
plt.title('Passive Learning Curve — Resolved')
plt.xlabel('Number of radio objects')
plt.ylabel('Balanced accuracy')
plt.xscale('log')
plt.scatter(n_atlas_resolved, numpy.array(bas_resolved) * 100, alpha=0.7, color='red', label='Resolved')
plt.scatter(n_atlas, numpy.array(bas) * 100, alpha=0.7, color='green', label='Compact')
plt.title('Accuracy against number of objects in training set')
plt.xlabel('Number of radio objects')
plt.ylabel('Balanced accuracy (%)')
plt.xscale('log')
plt.legend()
n_atlas_to_acc_compact = {n: [] for n in n_atlas}
for n, ba in zip(n_atlas, bas):
n_atlas_to_acc_compact[n].append(ba)
xs_compact = []
ys_compact = []
yerr_compact = []
for n in sorted(set(n_atlas)):
xs_compact.append(n)
ys_compact.append(numpy.mean(n_atlas_to_acc_compact[n]))
yerr_compact.append(numpy.std(n_atlas_to_acc_compact[n]))
xs_compact = numpy.array(xs_compact)
ys_compact = numpy.array(ys_compact)
yerr_compact = numpy.array(yerr_compact)
ylow_compact = ys_compact - yerr_compact
yhigh_compact = ys_compact + yerr_compact
n_atlas_to_acc_resolved = {n: [] for n in n_atlas_resolved}
for n, ba in zip(n_atlas_resolved, bas_resolved):
n_atlas_to_acc_resolved[n].append(ba)
xs_resolved = []
ys_resolved = []
yerr_resolved = []
for n in sorted(set(n_atlas_resolved)):
xs_resolved.append(n)
ys_resolved.append(numpy.mean(n_atlas_to_acc_resolved[n]))
yerr_resolved.append(numpy.std(n_atlas_to_acc_resolved[n]))
xs_resolved = numpy.array(xs_resolved)
ys_resolved = numpy.array(ys_resolved)
yerr_resolved = numpy.array(yerr_resolved)
ylow_resolved = ys_resolved - yerr_resolved
yhigh_resolved = ys_resolved + yerr_resolved
plt.plot(xs_compact, ys_compact, alpha=1, color='green', label='compact', marker='x')
plt.fill_between(xs_compact, ylow_compact, yhigh_compact, alpha=.2, color='green')
plt.plot(xs_resolved, ys_resolved, alpha=1, color='blue', label='resolved', marker='x')
plt.fill_between(xs_resolved, ylow_resolved, yhigh_resolved, alpha=.2, color='blue')
plt.title('Accuracy against number of objects in training set')
plt.xlabel('Number of radio objects')
plt.ylabel('Balanced accuracy (%)')
plt.xscale('log')
plt.legend()
plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/passive.pdf')
"""
Explanation: Resolved
End of explanation
"""
|
cgpotts/cs224u | finetuning.ipynb | apache-2.0 | __author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2022"
"""
Explanation: Bringing contextual word representations into your models
End of explanation
"""
import os
from sklearn.metrics import classification_report
import torch
import torch.nn as nn
import transformers
from transformers import BertModel, BertTokenizer
from torch_shallow_neural_classifier import TorchShallowNeuralClassifier
from torch_rnn_classifier import TorchRNNModel
from torch_rnn_classifier import TorchRNNClassifier
from torch_rnn_classifier import TorchRNNClassifierModel
from torch_rnn_classifier import TorchRNNClassifier
import sst
import utils
utils.fix_random_seeds()
SST_HOME = os.path.join("data", "sentiment")
"""
Explanation: Contents
Overview
General set-up
Hugging Face BERT models and tokenizers
BERT featurization with Hugging Face
Simple feed-forward experiment
A feed-forward experiment with the sst module
An RNN experiment with the sst module
BERT fine-tuning with Hugging Face
HfBertClassifier
HfBertClassifier experiment
Overview
This notebook provides a basic introduction to using pre-trained BERT representations with the Hugging Face library. It is meant as a practical companion to our lecture on contextual word representations. The goal of this notebook is just to help you use these representations in your own work.
If you haven't already, I encourage you to review the notebook vsm_04_contextualreps.ipynb before working with this one. That notebook covers the fundamentals of these models; this one dives into the details more quickly.
A number of the experiments in this notebook are resource-intensive. I've included timing information for the expensive steps, to give you a sense for how long things are likely to take. I ran this notebook on a laptop with a single NVIDIA RTX 2080 GPU.
General set-up
The following are requirements that you'll already have met if you've been working in this repository. As you can see, we'll use the Stanford Sentiment Treebank for illustrations, and we'll try out a few different deep learning models.
End of explanation
"""
transformers.logging.set_verbosity_error()
"""
Explanation: The transformers library does a lot of logging. To avoid ending up with a cluttered notebook, I am changing the logging level. You might want to skip this as you scale up to building production systems, since the logging is very good – it gives you a lot of insights into what the models and code are doing.
End of explanation
"""
weights_name = 'bert-base-cased'
"""
Explanation: Hugging Face BERT models and tokenizers
We'll illustrate with the BERT-base cased model:
End of explanation
"""
bert_tokenizer = BertTokenizer.from_pretrained(weights_name)
bert_model = BertModel.from_pretrained(weights_name)
"""
Explanation: There are lots other options for pretrained weights. See this Hugging Face directory.
Next, we specify a tokenizer and a model that match both each other and our choice of pretrained weights:
End of explanation
"""
example_texts = [
"Encode sentence 1. [SEP] And sentence 2!",
"Bert knows Snuffleupagus"]
"""
Explanation: For modeling (as opposed to creating static representations), we will mostly process examples in batches – generally very small ones, as these models consume a lot of memory. Here's a small batch of texts to use as the starting point for illustrations:
End of explanation
"""
example_ids = bert_tokenizer.batch_encode_plus(
example_texts,
add_special_tokens=True,
return_attention_mask=True,
padding='longest')
example_ids.keys()
"""
Explanation: We will often need to pad (and perhaps truncate) token lists so that we can work with fixed-dimensional tensors: The batch_encode_plus has a lot of options for doing this:
End of explanation
"""
example_ids['input_ids']
"""
Explanation: The token_type_ids is used for multi-text inputs like NLI. The 'input_ids' field gives the indices for each of the two examples:
End of explanation
"""
example_ids['attention_mask']
"""
Explanation: Notice that the final two tokens of the second example are pad tokens.
For fine-tuning, we want to avoid attending to padded tokens. The 'attention_mask' captures the needed mask, which we'll be able to feed directly to the pretrained BERT model:
End of explanation
"""
X_example = torch.tensor(example_ids['input_ids'])
X_example_mask = torch.tensor(example_ids['attention_mask'])
with torch.no_grad():
reps = bert_model(X_example, attention_mask=X_example_mask)
"""
Explanation: Finally, we can run these indices and masks through the pretrained model:
End of explanation
"""
reps.pooler_output.shape
"""
Explanation: Hugging Face BERT models create a special pooler_output representation that is the final representation above the [CLS] extended with a single layer of parameters:
End of explanation
"""
reps.last_hidden_state.shape
"""
Explanation: We have two examples, each representented by a single vector of dimension 768, which is $d_{model}$ for BERT base using the notation from the original Transformers paper. This is an easy basis for fine-tuning, as we will see.
We can also access the final output for each state:
End of explanation
"""
def bert_phi(text):
input_ids = bert_tokenizer.encode(text, add_special_tokens=True)
X = torch.tensor([input_ids])
with torch.no_grad():
reps = bert_model(X)
return reps.last_hidden_state.squeeze(0).numpy()
"""
Explanation: Here, we have 2 examples, each padded to the length of the longer one (12), and each of those representations has dimension 768. These representations can be used for sequence modeling, or pooled somehow for simple classifiers.
Those are all the essential ingredients for working with these parameters in Hugging Face. Of course, the library has a lot of other functionality, but the above suffices to featurize and to fine-tune.
BERT featurization with Hugging Face
To start, we'll use the Hugging Face interfaces just to featurize examples to create inputs to a separate model. In this setting, the BERT parameters are frozen.
End of explanation
"""
def bert_classifier_phi(text):
reps = bert_phi(text)
#return reps.mean(axis=0) # Another good, easy option.
return reps[0]
"""
Explanation: Simple feed-forward experiment
For a simple feed-forward experiment, we can get the representation of the [CLS] tokens and use them as the inputs to a shallow neural network:
End of explanation
"""
train = sst.train_reader(SST_HOME)
dev = sst.dev_reader(SST_HOME)
"""
Explanation: Next we read in the SST train and dev splits:
End of explanation
"""
X_str_train = train.sentence.values
y_train = train.label.values
X_str_dev = dev.sentence.values
y_dev = dev.label.values
"""
Explanation: Split the input/output pairs out into separate lists:
End of explanation
"""
%time X_train = [bert_classifier_phi(text) for text in X_str_train]
%time X_dev = [bert_classifier_phi(text) for text in X_str_dev]
"""
Explanation: In the next step, we featurize all of the examples. These steps are likely to be the slowest in these experiments:
End of explanation
"""
model = TorchShallowNeuralClassifier(
early_stopping=True,
hidden_dim=300)
%time _ = model.fit(X_train, y_train)
preds = model.predict(X_dev)
print(classification_report(y_dev, preds, digits=3))
"""
Explanation: Now that all the examples are featurized, we can fit a model and evaluate it:
End of explanation
"""
def fit_shallow_network(X, y):
mod = TorchShallowNeuralClassifier(
hidden_dim=300,
early_stopping=True)
mod.fit(X, y)
return mod
%%time
_ = sst.experiment(
sst.train_reader(SST_HOME),
bert_classifier_phi,
fit_shallow_network,
assess_dataframes=sst.dev_reader(SST_HOME),
vectorize=False) # Pass in the BERT reps directly!
"""
Explanation: A feed-forward experiment with the sst module
It is straightforward to conduct experiments like the above using sst.experiment, which will enable you to do a wider range of experiments without writing or copy-pasting a lot of code.
End of explanation
"""
def fit_rnn(X, y):
mod = TorchRNNClassifier(
vocab=[],
early_stopping=True,
use_embedding=False) # Pass in the BERT hidden states directly!
mod.fit(X, y)
return mod
%%time
_ = sst.experiment(
sst.train_reader(SST_HOME),
bert_phi,
fit_rnn,
assess_dataframes=sst.dev_reader(SST_HOME),
vectorize=False) # Pass in the BERT hidden states directly!
"""
Explanation: An RNN experiment with the sst module
We can also use BERT representations as the input to an RNN. There is just one key change from how we used these models before:
Previously, we would feed in lists of tokens, and they would be converted to indices into a fixed embedding space. This presumes that all words have the same representation no matter what their context is.
With BERT, we skip the embedding entirely and just feed in lists of BERT vectors, which means that the same word can be represented in different ways.
TorchRNNClassifier supports this via use_embedding=False. In turn, you needn't supply a vocabulary:
End of explanation
"""
class HfBertClassifierModel(nn.Module):
def __init__(self, n_classes, weights_name='bert-base-cased'):
super().__init__()
self.n_classes = n_classes
self.weights_name = weights_name
self.bert = BertModel.from_pretrained(self.weights_name)
self.bert.train()
self.hidden_dim = self.bert.embeddings.word_embeddings.embedding_dim
# The only new parameters -- the classifier:
self.classifier_layer = nn.Linear(
self.hidden_dim, self.n_classes)
def forward(self, indices, mask):
reps = self.bert(
indices, attention_mask=mask)
return self.classifier_layer(reps.pooler_output)
"""
Explanation: BERT fine-tuning with Hugging Face
The above experiments are quite successful – BERT gives us a reliable boost compared to other methods we've explored for the SST task. However, we might expect to do even better if we fine-tune the BERT parameters as part of fitting our SST classifier. To do that, we need to incorporate the Hugging Face BERT model into our classifier. This too is quite straightforward.
HfBertClassifier
The most important step is to create an nn.Module subclass that has, for its parameters, both the BERT model and parameters for our own classifier. Here we define a very simple fine-tuning set-up in which some layers built on top of the output corresponding to [CLS] are used as the basis for the SST classifier:
End of explanation
"""
class HfBertClassifier(TorchShallowNeuralClassifier):
def __init__(self, weights_name, *args, **kwargs):
self.weights_name = weights_name
self.tokenizer = BertTokenizer.from_pretrained(self.weights_name)
super().__init__(*args, **kwargs)
self.params += ['weights_name']
def build_graph(self):
return HfBertClassifierModel(self.n_classes_, self.weights_name)
def build_dataset(self, X, y=None):
data = self.tokenizer.batch_encode_plus(
X,
max_length=None,
add_special_tokens=True,
padding='longest',
return_attention_mask=True)
indices = torch.tensor(data['input_ids'])
mask = torch.tensor(data['attention_mask'])
if y is None:
dataset = torch.utils.data.TensorDataset(indices, mask)
else:
self.classes_ = sorted(set(y))
self.n_classes_ = len(self.classes_)
class2index = dict(zip(self.classes_, range(self.n_classes_)))
y = [class2index[label] for label in y]
y = torch.tensor(y)
dataset = torch.utils.data.TensorDataset(indices, mask, y)
return dataset
"""
Explanation: As you can see, self.bert does the heavy-lifting: it reads in all the pretrained BERT parameters, and I've specified self.bert.train() just to make sure that these parameters can be updated during our training process.
In forward, self.bert is used to process inputs, and then pooler_output is fed into self.classifier_layer. Hugging Face has already added a layer on top of the actual output for [CLS], so we can specify the model as
$$
\begin{align}
[h_{1}, \ldots, h_{n}] &= \text{BERT}([x_{1}, \ldots, x_{n}]) \
h &= \tanh(h_{1}W_{hh} + b_{h}) \
y &= \textbf{softmax}(hW_{hy} + b_{y})
\end{align}$$
for a tokenized input sequence $[x_{1}, \ldots, x_{n}]$.
The Hugging Face documentation somewhat amusingly says, of pooler_output,
This output is usually not a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence.
which is entirely reasonable, but it will require more resources, so we'll do the simpler thing here.
For the training and prediction interface, we can subclass TorchShallowNeuralClassifier so that we don't have to write any of our own data-handling, training, or prediction code. The central changes are using HfBertClassifierModel in build_graph and processing the data with batch_encode_plus.
End of explanation
"""
def bert_fine_tune_phi(text):
return text
def fit_hf_bert_classifier_with_hyperparameter_search(X, y):
basemod = HfBertClassifier(
weights_name='bert-base-cased',
batch_size=8, # Small batches to avoid memory overload.
max_iter=1, # We'll search based on 1 iteration for efficiency.
n_iter_no_change=5, # Early-stopping params are for the
early_stopping=True) # final evaluation.
param_grid = {
'gradient_accumulation_steps': [1, 4, 8],
'eta': [0.00005, 0.0001, 0.001],
'hidden_dim': [100, 200, 300]}
bestmod = utils.fit_classifier_with_hyperparameter_search(
X, y, basemod, cv=3, param_grid=param_grid)
return bestmod
%%time
bert_classifier_xval = sst.experiment(
sst.train_reader(SST_HOME),
bert_fine_tune_phi,
fit_hf_bert_classifier_with_hyperparameter_search,
assess_dataframes=sst.dev_reader(SST_HOME),
vectorize=False) # Pass in the BERT hidden state directly!
"""
Explanation: HfBertClassifier experiment
That's it! Let's see how we do on the SST binary, root-only problem. Because fine-tuning is expensive, we'll conduct a modest hyperparameter search and run the model for just one epoch per setting evaluation, as we did when assessing NLI models.
End of explanation
"""
optimized_bert_classifier = bert_classifier_xval['model']
# Remove the rest of the experiment results to clear out some memory:
del bert_classifier_xval
def fit_optimized_hf_bert_classifier(X, y):
optimized_bert_classifier.max_iter = 1000
optimized_bert_classifier.fit(X, y)
return optimized_bert_classifier
test_df = sst.sentiment_reader(
os.path.join(SST_HOME, "sst3-test-labeled.csv"))
%%time
_ = sst.experiment(
sst.train_reader(SST_HOME),
bert_fine_tune_phi,
fit_optimized_hf_bert_classifier,
assess_dataframes=test_df,
vectorize=False) # Pass in the BERT hidden state directly!
"""
Explanation: And now on to the final test-set evaluation, using the best model from above:
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.13.1/examples/notebooks/generated/statespace_cycles.ipynb | bsd-3-clause | %matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from pandas_datareader.data import DataReader
endog = DataReader('UNRATE', 'fred', start='1954-01-01')
endog.index.freq = endog.index.inferred_freq
"""
Explanation: Trends and cycles in unemployment
Here we consider three methods for separating a trend and cycle in economic data. Supposing we have a time series $y_t$, the basic idea is to decompose it into these two components:
$$
y_t = \mu_t + \eta_t
$$
where $\mu_t$ represents the trend or level and $\eta_t$ represents the cyclical component. In this case, we consider a stochastic trend, so that $\mu_t$ is a random variable and not a deterministic function of time. Two of methods fall under the heading of "unobserved components" models, and the third is the popular Hodrick-Prescott (HP) filter. Consistent with e.g. Harvey and Jaeger (1993), we find that these models all produce similar decompositions.
This notebook demonstrates applying these models to separate trend from cycle in the U.S. unemployment rate.
End of explanation
"""
hp_cycle, hp_trend = sm.tsa.filters.hpfilter(endog, lamb=129600)
"""
Explanation: Hodrick-Prescott (HP) filter
The first method is the Hodrick-Prescott filter, which can be applied to a data series in a very straightforward method. Here we specify the parameter $\lambda=129600$ because the unemployment rate is observed monthly.
End of explanation
"""
mod_ucarima = sm.tsa.UnobservedComponents(endog, 'rwalk', autoregressive=4)
# Here the powell method is used, since it achieves a
# higher loglikelihood than the default L-BFGS method
res_ucarima = mod_ucarima.fit(method='powell', disp=False)
print(res_ucarima.summary())
"""
Explanation: Unobserved components and ARIMA model (UC-ARIMA)
The next method is an unobserved components model, where the trend is modeled as a random walk and the cycle is modeled with an ARIMA model - in particular, here we use an AR(4) model. The process for the time series can be written as:
$$
\begin{align}
y_t & = \mu_t + \eta_t \
\mu_{t+1} & = \mu_t + \epsilon_{t+1} \
\phi(L) \eta_t & = \nu_t
\end{align}
$$
where $\phi(L)$ is the AR(4) lag polynomial and $\epsilon_t$ and $\nu_t$ are white noise.
End of explanation
"""
mod_uc = sm.tsa.UnobservedComponents(
endog, 'rwalk',
cycle=True, stochastic_cycle=True, damped_cycle=True,
)
# Here the powell method gets close to the optimum
res_uc = mod_uc.fit(method='powell', disp=False)
# but to get to the highest loglikelihood we do a
# second round using the L-BFGS method.
res_uc = mod_uc.fit(res_uc.params, disp=False)
print(res_uc.summary())
"""
Explanation: Unobserved components with stochastic cycle (UC)
The final method is also an unobserved components model, but where the cycle is modeled explicitly.
$$
\begin{align}
y_t & = \mu_t + \eta_t \
\mu_{t+1} & = \mu_t + \epsilon_{t+1} \
\eta_{t+1} & = \eta_t \cos \lambda_\eta + \eta_t^ \sin \lambda_\eta + \tilde \omega_t \qquad & \tilde \omega_t \sim N(0, \sigma_{\tilde \omega}^2) \
\eta_{t+1}^ & = -\eta_t \sin \lambda_\eta + \eta_t^ \cos \lambda_\eta + \tilde \omega_t^ & \tilde \omega_t^* \sim N(0, \sigma_{\tilde \omega}^2)
\end{align}
$$
End of explanation
"""
fig, axes = plt.subplots(2, figsize=(13,5));
axes[0].set(title='Level/trend component')
axes[0].plot(endog.index, res_uc.level.smoothed, label='UC')
axes[0].plot(endog.index, res_ucarima.level.smoothed, label='UC-ARIMA(2,0)')
axes[0].plot(hp_trend, label='HP Filter')
axes[0].legend(loc='upper left')
axes[0].grid()
axes[1].set(title='Cycle component')
axes[1].plot(endog.index, res_uc.cycle.smoothed, label='UC')
axes[1].plot(endog.index, res_ucarima.autoregressive.smoothed, label='UC-ARIMA(2,0)')
axes[1].plot(hp_cycle, label='HP Filter')
axes[1].legend(loc='upper left')
axes[1].grid()
fig.tight_layout();
"""
Explanation: Graphical comparison
The output of each of these models is an estimate of the trend component $\mu_t$ and an estimate of the cyclical component $\eta_t$. Qualitatively the estimates of trend and cycle are very similar, although the trend component from the HP filter is somewhat more variable than those from the unobserved components models. This means that relatively mode of the movement in the unemployment rate is attributed to changes in the underlying trend rather than to temporary cyclical movements.
End of explanation
"""
|
greenelab/GCB535 | 29_ML-II/ML2_svms_and_overfitting.ipynb | bsd-3-clause | import numpy as np
from sklearn import svm
from sklearn import preprocessing
# Define a useful helper function to read in our PCL files and store the gene names,
# matrix of values, and sample names
# We'll use this function later, but we don't need to dig into how it works here.
def read_dataset(filename):
data_fh = open(filename)
samples = data_fh.readline().strip().split('\t') # sample ids tab delimited
gids = [] # gene ids will be stored here
genes_samples = [] # genes x samples -- gene major
for line in data_fh:
toks = line.strip().split('\t')
gids.append(toks[0]) # add gene id
vals = [float(x) for x in toks[1:]]
zarray = preprocessing.scale(vals) # make each gene's expression values comparable
genes_samples.append(zarray)
data_fh.close()
#because we want samples x genes instead of genes x samples, we need to transpose
samples_genes = np.transpose(np.array(genes_samples))
return {'genes': gids, 'matrix': samples_genes, 'samples': samples}
# Use the function that we defined to read in our dataset
bric = read_dataset('../29_Data_ML-II/METABRIC_dataset.pcl')
# Now we need to figure out which samples in metabric are tumors and which are normal.
# We will store this in status_list (matching each example in the dataset), so that we
# can provide this to scikit learn's SVM implementation.
status = {} # hold tumor/normal status encoded as 1 (tumor)/2 (normal)
label_fh = open('tumor_normal_label.txt')
for line in label_fh:
toks = line.strip().split()
if toks[1] == 'Tumor':
status[toks[0]] = 1
elif toks[1] == 'Normal':
status[toks[0]] = 2
status_list = []
for sample in bric['samples']:
status_list.append(status[sample])
# Now we're going to construct a classifier. First we need to set up our parameters
svm_classifier = svm.SVC(C=0.000001, kernel='linear')
# Once our parameters are set, we can fit the classifier to our data
svm_classifier.fit(bric['matrix'], status_list)
# Once we have our classifier, we can apply it back to the examples and get our score
# Since this is binary classification. We get an accuracy.
score = svm_classifier.score(bric['matrix'], status_list)
print("Training Accuracy: " + str(score))
"""
Explanation: Today we're going to walk through an example of predicting tumor and normal status directly from gene expression values. We'll be using the python package scikit learn to construct our SVM classifier. For machine learning, we highly recommend this package.
Lots of documentation is available:
http://scikit-learn.org/stable/documentation.html
We're going to be working on a support vector machine classifier. As we dig into the details, make sure you're referring to the documentation for more information:
http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
End of explanation
"""
## Load necessary Python packages
import numpy as np # numpy makes it convenient to load/modify matrices of data
import sklearn.linear_model as lm # this scikit learn module has code to fit a line
import matplotlib.pyplot as plt # this lets us plot our results
from sklearn.metrics import mean_squared_error # we use this to see how well our model fits data
%matplotlib inline
# This code will make our data by adding random noise to a linear relationship
# Simulate two variables x and y
# y=x+e, e is some noise
x = np.linspace(0., 2, 10)
y = x + 0.5*np.random.randn(len(x))
"""
Explanation: Congratulations! You've built your first SVM, and on training data it separates tumor data from normal data with over 90% accuracy! Now that we've done this with some biomedical data, let's take a step back and talk about things we should consider as we build a model.
Q1: What are our labels?
Q2: What are our features?
Q3: What are our examples?
Overfitting in machine learning
When you train a computer to build a model that describes data that you've seen, a challenge known as "overfitting" can arise. When fitting the model, we want to find a model that fits the data as well as possible. However, real data is noisy. The model that fits data we have with the least error may capture the main features of the data, but may also capture noise in the data that we don't intend to model. When a model fits noise in training data, we call this problem overfitting.
For example, imagine that a professor wants to test a group of students' knowledge of calculus. She gives the students previous exam questions and answers to study. However, in the final exam, she uses the same questions to test the students. Some of the students could do very well because they memorized answers to the questions even though they don't understand calculus. The professor realizes this problem and then gives the students a new set of questions to test them. The students who memorized all the answers to previous exam questions may fail the new exam because they have no idea how to solve the new problems. We would say that those students have "overfit" to training data.
How can overfitting be a problem with machine learning? Don't we want the model to fit the data as well as possible? The reason is we want a model that captures the features that will also exist in some new data. If the model fits the noise in the data, the model will perform poorly on new data sets!
Let's use simulations to illustrate the overfitting problem. We are going to simulate two variables x and y and we let y = x + e, where e is some noise. That is, y is a linear function of x.
End of explanation
"""
# This uses matplotlib to show points. You've seen a little bit of this before in the kmeans code
# We're using it for examples but you don't have to understand how this works.
# If you one day want to plot your results using python, you might want to keep this code
# as a reference.
plt.figure(figsize=(8,6))
plt.scatter(x[:100], y[:100])
plt.xlabel("x")
plt.ylabel("y")
#plt.plot(x, y)
"""
Explanation: Let's plot the data. The code in the box below will do this. As we can see, the relation between x and y is linear but with some random noise.
End of explanation
"""
# You don't need to know how this code works. We're not going to focus on regression
# during this course. You may want to have it to refer to in the future.
### simple regression
lr = lm.LinearRegression()
lr.fit(x[:,np.newaxis], y);
y_lr = lr.predict(x[:, np.newaxis])
### multiple regression
lrp = lm.LinearRegression()
lrp.fit(np.vander(x, N=10, increasing=True), y)
y_lrp = lrp.predict(np.vander(x, N=10, increasing=True))
x_plot = np.linspace(0., 2, 1000)
y_plot = lrp.predict(np.vander(x_plot, N=10, increasing=True))
"""
Explanation: Next, we want to train linear regression models on x and use the models to predict y. The models we are going to use are:
1. A simple linear regression model: Y~X
2. A complex multiple regression model: Y ~ X + X^2 + X^3 + X^4 ... + X^10
We want to choose the model that will most accurately predict y.
Let's use ski-learn to train these two models:
End of explanation
"""
plt.figure(figsize=(8,6))
plt.scatter(x, y)
plt.plot(x, y_lr, 'g',label='Simple regression')
plt.title("Linear regression")
plt.plot(x_plot, y_plot,label='Multiple regression')
plt.legend(loc=2)
"""
Explanation: Let's plot the fitting results.
End of explanation
"""
mean_squared_error(y, y_lr)
"""
Explanation: Let's calculate the MSE for simple regression model:
End of explanation
"""
mean_squared_error(y, y_lrp)
"""
Explanation: Let's calculate the MSE for multiple regression model:
End of explanation
"""
x_new = np.linspace(0., 2, 10)
y_new = x + 0.5*np.random.randn(len(x_new))
y_lr_new = lr.predict(x_new[:, np.newaxis])
y_lrp_new = lrp.predict(np.vander(x_new, N=10, increasing=True))
"""
Explanation: The multiple regression model fits the data perferlly (MSE is almost 0). The predicted values are the exact the same as the observed values since the prediction curve goes through every point. However, the simple regression model captures the linear relation between x and y but it didn't predict perfectlly well with the observed values. Then, shoud we choose multiple regression model rather than simple regression model since the former fitts the data much better than the latter?
Q4: Which model do you think is the better model? Why?
Remember that we want to find a model that fits the data well and, most importantly, can predict well on some new data. Let's simulate some new data and see the prediction performance of each model on the new data.
End of explanation
"""
plt.figure(figsize=(8,6))
plt.scatter(x_new, y_new)
plt.plot(x, y_lr, 'g',label='Simple regression')
plt.title("Linear regression")
plt.plot(x_plot, y_plot,label='Multiple regression')
plt.legend(loc=2)
"""
Explanation: Let's plot the old models applied to the new data.
End of explanation
"""
mean_squared_error(y_new, y_lr_new)
"""
Explanation: MSE for simple regression on new data:
End of explanation
"""
mean_squared_error(y_new, y_lrp_new)
"""
Explanation: MSE for multiple regression on new data:
End of explanation
"""
# Let's read in the dataset and mark examples as tumor or normal depending on
# how they are annotated the sample description file (BRCA.547.PAM50.SigClust.Subtypes.txt)
tcga = read_dataset('../29_Data_ML-II/TCGA_dataset.pcl')
tcga_status = {} # hol tumor/normal status encoded as 1 (tumor)/2 (normal)
label_fh = open('BRCA.547.PAM50.SigClust.Subtypes.txt')
for line in label_fh:
toks = line.strip().split()
if toks[1] == 'tumor-adjacent normal':
tcga_status[toks[0]] = 2
else:
tcga_status[toks[0]] = 1
tcga_status_list = []
for sample in tcga['samples']:
tcga_status_list.append(tcga_status[sample])
# The first lines here are just the code from above copied down for convenience.
# Now we're going to construct a classifier. First we need to set up our parameters
svm_classifier = svm.SVC(C=0.000000001, kernel='linear')
# Once our parameters are set, we can fit the classifier to our data
svm_classifier.fit(bric['matrix'], status_list)
# Once we have our classifier, we can apply it back to the examples and get our score
# Since this is binary classification. We get an accuracy.
score = svm_classifier.score(bric['matrix'], status_list)
print("Training Accuracy: " + str(score))
# Ok - now let's apply our classifier from before to these data:
tcga_score = svm_classifier.score(tcga['matrix'], tcga_status_list)
print("Testing Accuracy: " + str(tcga_score))
"""
Explanation: The multiple regression model will almost certainly perform worse than simple regression model on the new data (we don't know for sure in your case, because new data are simulated each time - check with your neighbors to see what they get as well, or feel free to clear and re-run the code to see another example). This is because the multiple regression model overfits the training data. It captures not only the true linear relation between x and y but also the random noise. However, simple regression only captures linear relation.
This also demonstrates that it is not a good idea to train and evaluate a model on the same data set. If so, we tend to choose the model that overfits the data. However, in real data analysis, you will occasionally see papers reporting nearly perfect model fitting results. If you look closely, you will find that the authors fit and evaluate the model on the same data set. You now know that this is a typical overfitting problem. In your future research, be careful with the overfitting problem when you try some machine learning models on your data!
To avoid overfitting, there are several methods. One is to use regularization in the model to reduce the model complexity. The other is to train the model on one dataset and evaluate the model on a separate dataset. For now, we'll cover evaluating on a separate dataset.
Homework: BRCA Tumor/Normal - Revisited!
We are lucky enough to have an independent validation dataset of breast cancers from The Cancer Genome Atlas (TCGA). Let's see how our classifier does here!
End of explanation
"""
|
Kaggle/learntools | notebooks/feature_engineering/raw/tut4.ipynb | apache-2.0 | #$HIDE_INPUT$
%matplotlib inline
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv',
parse_dates=['deadline', 'launched'])
# Drop live projects
ks = ks.query('state != "live"')
# Add outcome column, "successful" == 1, others are 0
ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int))
# Timestamp features
ks = ks.assign(hour=ks.launched.dt.hour,
day=ks.launched.dt.day,
month=ks.launched.dt.month,
year=ks.launched.dt.year)
# Label encoding
cat_features = ['category', 'currency', 'country']
encoder = LabelEncoder()
encoded = ks[cat_features].apply(encoder.fit_transform)
data_cols = ['goal', 'hour', 'day', 'month', 'year', 'outcome']
baseline_data = ks[data_cols].join(encoded)
cat_features = ['category', 'currency', 'country']
interactions = pd.DataFrame(index=ks.index)
for col1, col2 in itertools.combinations(cat_features, 2):
new_col_name = '_'.join([col1, col2])
# Convert to strings and combine
new_values = ks[col1].map(str) + "_" + ks[col2].map(str)
label_enc = LabelEncoder()
interactions[new_col_name] = label_enc.fit_transform(new_values)
baseline_data = baseline_data.join(interactions)
launched = pd.Series(ks.index, index=ks.launched, name="count_7_days").sort_index()
count_7_days = launched.rolling('7d').count() - 1
count_7_days.index = launched.values
count_7_days = count_7_days.reindex(ks.index)
baseline_data = baseline_data.join(count_7_days)
def time_since_last_project(series):
# Return the time in hours
return series.diff().dt.total_seconds() / 3600.
df = ks[['category', 'launched']].sort_values('launched')
timedeltas = df.groupby('category').transform(time_since_last_project)
timedeltas = timedeltas.fillna(timedeltas.max())
baseline_data = baseline_data.join(timedeltas.rename({'launched': 'time_since_last_project'}, axis=1))
def get_data_splits(dataframe, valid_fraction=0.1):
valid_fraction = 0.1
valid_size = int(len(dataframe) * valid_fraction)
train = dataframe[:-valid_size * 2]
# valid size == test size, last two sections of the data
valid = dataframe[-valid_size * 2:-valid_size]
test = dataframe[-valid_size:]
return train, valid, test
def train_model(train, valid):
feature_cols = train.columns.drop('outcome')
dtrain = lgb.Dataset(train[feature_cols], label=train['outcome'])
dvalid = lgb.Dataset(valid[feature_cols], label=valid['outcome'])
param = {'num_leaves': 64, 'objective': 'binary',
'metric': 'auc', 'seed': 7}
print("Training model!")
bst = lgb.train(param, dtrain, num_boost_round=1000, valid_sets=[dvalid],
early_stopping_rounds=10, verbose_eval=False)
valid_pred = bst.predict(valid[feature_cols])
valid_score = metrics.roc_auc_score(valid['outcome'], valid_pred)
print(f"Validation AUC score: {valid_score:.4f}")
return bst
"""
Explanation: Introduction
Often you'll have hundreds or thousands of features after various encodings and feature generation. This can lead to two problems. First, the more features you have, the more likely you are to overfit to the training and validation sets. This will cause your model to perform worse at generalizing to new data.
Secondly, the more features you have, the longer it will take to train your model and optimize hyperparameters. Also, when building user-facing products, you'll want to make inference as fast as possible. Using fewer features can speed up inference at the cost of predictive performance.
To help with these issues, you'll want to use feature selection techniques to keep the most informative features for your model.
We'll show that in this lesson.
End of explanation
"""
from sklearn.feature_selection import SelectKBest, f_classif
feature_cols = baseline_data.columns.drop('outcome')
# Keep 5 features
selector = SelectKBest(f_classif, k=5)
X_new = selector.fit_transform(baseline_data[feature_cols], baseline_data['outcome'])
X_new
"""
Explanation: Univariate Feature Selection
The simplest and fastest methods are based on univariate statistical tests. For each feature, measure how strongly the target depends on the feature using a statistical test like $\chi^2$ or ANOVA.
From the scikit-learn feature selection module, feature_selection.SelectKBest returns the K best features given some scoring function. For our classification problem, the module provides three different scoring functions: $\chi^2$, ANOVA F-value, and the mutual information score. The F-value measures the linear dependency between the feature variable and the target. This means the score might underestimate the relation between a feature and the target if the relationship is nonlinear. The mutual information score is nonparametric and so can capture nonlinear relationships.
With SelectKBest, we define the number of features to keep, based on the score from the scoring function. Using .fit_transform(features, target) we get back an array with only the selected features.
End of explanation
"""
feature_cols = baseline_data.columns.drop('outcome')
train, valid, _ = get_data_splits(baseline_data)
# Keep 5 features
selector = SelectKBest(f_classif, k=5)
X_new = selector.fit_transform(train[feature_cols], train['outcome'])
X_new
"""
Explanation: However, I've done something wrong here. The statistical tests are calculated using all of the data. This means information from the validation and test sets could influence the features we keep, introducing a source of leakage. This means we should select features using only a training set.
End of explanation
"""
# Get back the features we've kept, zero out all other features
selected_features = pd.DataFrame(selector.inverse_transform(X_new),
index=train.index,
columns=feature_cols)
selected_features.head()
"""
Explanation: You should notice that the selected features are different than when I used the entire dataset. Now we have our selected features, but it's only the feature values for the training set. To drop the rejected features from the validation and test sets, we need to figure out which columns in the dataset were kept with SelectKBest. To do this, we can use .inverse_transform to get back an array with the shape of the original data.
End of explanation
"""
# Dropped columns have values of all 0s, so var is 0, drop them
selected_columns = selected_features.columns[selected_features.var() != 0]
# Get the valid dataset with the selected features.
valid[selected_columns].head()
"""
Explanation: This returns a DataFrame with the same index and columns as the training set, but all the dropped columns are filled with zeros. We can find the selected columns by choosing features where the variance is non-zero.
End of explanation
"""
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
train, valid, _ = get_data_splits(baseline_data)
X, y = train[train.columns.drop("outcome")], train['outcome']
# Set the regularization parameter C=1
logistic = LogisticRegression(C=1, penalty="l1", solver='liblinear', random_state=7).fit(X, y)
model = SelectFromModel(logistic, prefit=True)
X_new = model.transform(X)
X_new
"""
Explanation: L1 regularization
Univariate methods consider only one feature at a time when making a selection decision. Instead, we can make our selection using all of the features by including them in a linear model with L1 regularization. This type of regularization (sometimes called Lasso) penalizes the absolute magnitude of the coefficients, as compared to L2 (Ridge) regression which penalizes the square of the coefficients.
As the strength of regularization is increased, features which are less important for predicting the target are set to 0. This allows us to perform feature selection by adjusting the regularization parameter. We choose the parameter by finding the best performance on a hold-out set, or decide ahead of time how many features to keep.
For regression problems you can use sklearn.linear_model.Lasso, or sklearn.linear_model.LogisticRegression for classification. These can be used along with sklearn.feature_selection.SelectFromModel to select the non-zero coefficients. Otherwise, the code is similar to the univariate tests.
End of explanation
"""
# Get back the kept features as a DataFrame with dropped columns as all 0s
selected_features = pd.DataFrame(model.inverse_transform(X_new),
index=X.index,
columns=X.columns)
# Dropped columns have values of all 0s, keep other columns
selected_columns = selected_features.columns[selected_features.var() != 0]
"""
Explanation: Similar to the univariate tests, we get back an array with the selected features. Again, we will want to convert these to a DataFrame so we can get the selected columns.
End of explanation
"""
|
mayank-johri/LearnSeleniumUsingPython | Section 1 - Core Python/Chapter 02 - Basics/2.2. Python Identifiers.ipynb | gpl-3.0 | current_month = "MAY"
print(current_month)
"""
Explanation: Python Identifiers aka Variables
In Python, variable names are kind of tags/pointers to the memory location which hosts the data. We can also think of it as a labeled container that can store a single value. That single value can be of practically any data type.
Storing Values in Variables:
In Python, the declaration & assignation of value to the variable are done at the same time, i.e. as soon as we assign a value to a non-existing or existing variable, the required memory location is assigned to it and proper data is populated in it.
NOTE: Storing Values in Python is one of the most important concepts and should be understood with great care.
End of explanation
"""
current_month = "MAY" # A comment.
date = 10
"""
Explanation: In the above example, current_month is the variable name and "MAY" is the value associated with it. Operation performed in the first line is called assignment and such statements are called assignment statements. Lets discuss them in details.
Assignment Statements
You’ll store values in variables with an assignment statement. An assignment statement consists of a variable name, an equal sign (called the assignment operator), and the value to be stored. If you enter the assignment statement current_month = "MAY", then a variable named current_month will be pointing to a memory location which has the string value "MAY" stored in it.
In Python, we do not need to declare variable explicitly. They are declared automatically when any value is assigned. The assignment is done using the equal (=) operator as shown in the below example:
End of explanation
"""
current_month = "JUNE"
"""
Explanation: The pictorial representation of variables from above example.
<img src="files/variables.png">
Now lets perform some actions on the variable current_month and observe the changes happening on it.
In the example shown below, we will reassign a new value JUNE to the variable current_month and observe the effects of it.
Image below shows the process of re-assignation. You will note that a new memory is assigned to the variable instead of using the existing one.
End of explanation
"""
current_month = "JUNE"
print(id(current_month))
next_month = "JUNE"
print(id(next_month))
next_month = "June"
print(id(next_month))
"""
Explanation: current_month was initially pointing to memory location containing value MAY and after reassination, it was pointing to a new memory location containing value JUNE and if no other referencing the previous value, then automatically Python GC will clean it at some future time.
End of explanation
"""
########## Reference count ###################
# NOTE: Please test the below code by saving
# it as a file and executing it instead
# of running it here.
#############################################
import sys
new_var = 10101010101000
print(sys.getrefcount(new_var))
"""
Explanation: Note: That value of MAY has not updated but a new memory was allocated for value JUNE and varialbe now points to it.
Later in the chapter, we will show the above senario with more examples.
How to find the reference count of a value
End of explanation
"""
x=y=z=1000
print(x, y, z)
"""
Explanation: NOTE:
The value of refcount will almost always be more than you think. It is done internally by python to optimize the code. I will be adding more details about it in "Section 2 -> Chapter: GC & Cleanup"
Multiple Assignment:
In multiple assignment, multiple variables are assigned values in a single line. There are two ways multiple assignment can be done in python. In first format all the variables point to the same value and in next all variables point to individual values.
1. Assigning single value to multiple variables:
End of explanation
"""
print(id(x))
print(id(y))
print(id(z))
"""
Explanation: In the above example, all x, y and z are pointing to same memory location which contains 1000, which we are able to identify by checking the id of the variables. They are pointing to the same memory location, thus value of id for all three are same.
End of explanation
"""
x = 200
print(x)
print(y)
print(z)
print(id(x))
print(id(y))
print(id(z))
"""
Explanation: Now, lets change value of one varialbe and again check respective ides.
End of explanation
"""
### INTEGER
x=1000
y=1000
z=1000
print(x)
print(y)
print(z)
print(id(x))
print(id(y))
print(id(z))
### String
x="1000"
y=1000
z="1000"
print(x)
print(y)
print(z)
print(id(x))
print(id(y))
print(id(z))
"""
Explanation: Now, lets test something else. Can different data types impact the behavior of python memory optimization. We will first test it with integer, string and then with list.
End of explanation
"""
### list
x = ["1000"]
y = [1000]
z = ["1000"]
a = [1000]
print(x)
print(y)
print(z)
print(a)
print(id(x))
print(id(y))
print(id(z))
print(id(a))
"""
Explanation: check the id of both x and z, they are same but y is not same.
End of explanation
"""
x, y, z = 10, 20, 30
print(x)
print(y)
print(z)
print(id(x))
print(id(y))
print(id(z))
x, y, z = 10, 120, 10
print(x)
print(y)
print(z)
print(id(x))
print(id(y))
print(id(z))
"""
Explanation: 2. Assigning multiple values to multiple variables:
End of explanation
"""
pm_name = "Narendra Modi"
prime_minister = "Narendra Modi"
cong_p_name = "Rahul Gandhi"
corrent_name_of_cong_president = "Rahul Gandhi"
cong_president = "Rahul Gandhi"
cname = "RG"
"""
Explanation: Variable Names & Naming Conventions
There are a couple of naming conventions in use in Python:
- lower_with_underscores: Uses only lower case letters and connects multiple words with underscores.
- UPPER_WITH_UNDERSCORES: Uses only upper case letters and connects multiple words with underscores.
- CapitalWords: Capitalize the beginning of each letter in a word; no underscores. With these conventions in mind, here are the naming conventions in use.
Variable Names: lower_with_underscores
Constants: UPPER_WITH_UNDERSCORES
Function Names: lower_with_underscores
Function Parameters: lower_with_underscores
Class Names: CapitalWords
Method Names: lower_with_underscores
Method Parameters and Variables: lower_with_underscores
Always use self as the first parameter to a method
To indicate privacy, precede name with a single underscore.
End of explanation
"""
this_is_my_number
THIS_IS_MY_NUMBER
ThisIsMyNumber
this_is_number
anotherVarible
This1
this1home
1This
__sd__
_sd
"""
Explanation: Options can be used to override the default regular expression associated to each type. The table below lists the types, their associated options, and their default regular expressions.
| Type | Default Expression |
|:-----------------:|:-----------------------------------------:|
| Argument | [a-z_][a-z0-9_] |
| Attribute | [a-z_][a-z0-9_] |
| Class | [A-Z_][a-zA-Z0-9] |
| Constant | (([A-Z_][A-Z0-9_] |
| Function | [a-z_][a-z0-9_] |
| Method | [a-z_][a-z0-9_] |
| Module | (([a-z_][a-z0-9_]), ([A-Z][a-zA-Z0-9])) |
| Variable | [a-z_][a-z0-9_] |
| Variable, inline1 | [A-Za-z_][A-Za-z0-9_] |
Please find the invalid variables name from the below list
End of explanation
"""
_ is used
* To use as ‘Internationalization(i18n)’ or ‘Localization(l10n)’ functions.
"""
Explanation: Good Variable Name
Choose meaningful name instead of short name. roll_no is better than rn.
Maintain the length of a variable name. Roll_no_of_a_student is too long?
Be consistent; roll_no or RollNo
Begin a variable name with an underscore(_) character for a special case.
Exercises
Q 1. Find the valid and in-valid variable names from the followings:
balance
current-balance
current balance
current_balance
4account
_spam
42
SPAM
total_$um
account4
'hello'
Q 2. Multiple Choice Questions & Answers
Is Python case sensitive when dealing with identifiers?
a) yes
b) no
c) machine dependent
d) none of the mentioned
What is the maximum possible length of an identifier?
a) 31 characters
b) 63 characters
c) 79 characters
d) none of the mentioned
What does local variable names beginning with an underscore mean?
a) they are used to indicate a private variables of a class
b) they confuse the interpreter
c) they are used to indicate global variables
d) None of the
Which of the following is true for variable names in Python?
a) unlimited length
b) Only _ and $ special characters allowed in variable name
c) private members should have leading & trailing underscores
d) None of the above
End of explanation
"""
|
sdpython/ensae_teaching_cs | _doc/notebooks/td1a_home/2020_ordonnancement.ipynb | mit | from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
"""
Explanation: Algo - Problème d'ordonnancement
Un problème d'ordonnancement est un problème dans lequel il faut déterminer le meilleur moment de démarrer un travail, une tâche alors que celles-ci ont des durées bien précises et dépendent les unes des autres.
End of explanation
"""
import numpy
import matplotlib.pyplot as plt
from jyquickhelper import RenderJsDot
def plot_network(mat):
# Dessine un graph à l'aide du language DOT
# https://graphviz.org/doc/info/lang.html
rows = ["digraph{ ", ' rankdir="LR";', ' size="4,4";']
for i in range(max(mat.shape)):
rows.append(" %d;" % i)
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
if mat[i, j] > 0:
rows.append(" %d -> %d;" % (i, j))
rows.append("}")
dot = "\n".join(rows)
# print(dot) # décommenter cette ligne pour voir le résultat
return RenderJsDot(dot)
mat = numpy.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
plot_network(mat)
"""
Explanation: Enoncé
On définit un problème d'ordonnancement un peu plus simple dans lequel toutes les tâches ont la même durée qu'on représente par une matrice d'adjacence non symétrique.
End of explanation
"""
def order_same_weight(mat):
# matrice la fin de chaque tâche
# au début, on suppose qu'elles se terminent toutes à l'origine des temps
fin = [-1 for i in range(mat.shape[0])]
for j in range(mat.shape[1]):
if mat[:, j].sum() == 0:
# si la tâche j ne dépend d'aucune autre tâche
# alors on peut commencer en 0
fin[j] = 0
update = True
while update:
update = False
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
if mat[i, j] == 0 or fin[i] == -1:
continue
# indique la j dépend de la tâche i
if fin[j] < fin[i] + 1:
update = True
fin[j] = fin[i] + 1
# fin[j] = max(fin[j], fin[i] + 1)
return fin
order_same_weight(mat)
"""
Explanation: Le graphe se lit comme suit : pour faire la tâche 2, il faut faire la tâche 0 et 1 d'abord.
Q1 : écrire un algorithme qui détermine dans quel ordre on peut exécuter les tâches.
Il peut y avoir plusieurs tâches en parallèle. Quelle forme pourrait prendre le résultat ?
Q2 : Et si les tâches n'ont plus la même durée ?
Ne pourrait-on pas réutiliser ce qu'on a fait avec une petite astuce...
Réponses
Q1
Comment représenter le résultat ? Une idée consiste à créer un tableau fin $F_{i}$ où i est la tâche. $F_{i}=t$ signifie qu'au temps t, la tâche i est finie.
End of explanation
"""
mat2 = numpy.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0]])
plot_network(mat2)
order_same_weight(mat2)
"""
Explanation: On vérifie sur un graphe plus compliqué.
End of explanation
"""
def order_any_weight(mat, durations):
# mat est la matrice précédente
# duractions est la durée de chaque tâche (les durées sont entières)
# matrice la fin de chaque tâche
# au début, on suppose qu'elles se terminent toutes à l'origine des temps
fin = [-1 for i in range(mat.shape[0])]
for j in range(mat.shape[1]):
if mat[:, j].sum() == 0:
# si la tâche j ne dépend d'aucune autre tâche
# alors on peut commencer en 0
fin[j] = 0
update = True
while update:
update = False
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
if mat[i, j] == 0 or fin[i] == -1:
continue
# indique la j dépend de la tâche i
new_end = fin[i] + durations[i] ########### ligne changée
if fin[j] < new_end:
update = True
fin[j] = new_end
# fin[j] = max(fin[j], fin[i] + 1)
return fin
order_any_weight(mat, durations=[1, 1, 1, 1, 1])
order_any_weight(mat, durations=[1, 2, 1, 1, 1])
"""
Explanation: Q2
Une astuce... Une tâche deux fois plus longue, c'est comme si on avait deux tâches, la seconde dépend uniquement de la première ou alors simple tenir compte de la durée lorsqu'on calcule le maximum. Voir la ligne ########### ligne changée.
End of explanation
"""
|
dwhswenson/openmm-mmst | examples/tully_model_1.ipynb | lgpl-2.1 | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as unit
sys11 = mm.openmm.System()
sys12 = mm.openmm.System()
sys22 = mm.openmm.System()
sys00 = mm.openmm.System()
for sys in [sys11, sys12, sys22, sys00]:
mass = 1980.0 * unit.amu
sys.addParticle(mass)
V11 = mm.openmm.CustomExternalForce("V0*tanh(a*x)")
V11.addGlobalParameter("V0", 0.01)
V11.addGlobalParameter("a", 1.6)
V11.addParticle(0, ())
V22 = mm.openmm.CustomExternalForce("-V0*tanh(a*x)")
V22.addGlobalParameter("V0", 0.01)
V22.addGlobalParameter("a", 1.6)
V22.addParticle(0, ())
V12 = mm.openmm.CustomExternalForce("C*exp(-D*(x+f))")
V12.addGlobalParameter("C", 0.005)
V12.addGlobalParameter("D", 1.0)
V12.addGlobalParameter("f", 0.0)
V12.addParticle(0, ())
V00 = mm.openmm.CustomExternalForce("0.0*x")
V00.addParticle(0, ())
sys00.addForce(V00)
sys11.addForce(V11)
sys12.addForce(V12)
sys22.addForce(V22)
topology = app.Topology()
dt = 5*46.0 * unit.femtoseconds
init_pos = np.array([[-5.0, 0.0, 0.0]]) #* unit.nanometer
#init_vel = np.array([[19.9/1980.0, 0.0, 0.0]]) #* unit.nanometer / unit.picosecond
init_vel = np.array([[0.0022, 0.0, 0.0]])
"""
Explanation: Tully model 1 from JCP 93, 1061 (1990). Details of this usage of the MMST matches that of Ananth et al. (JCP 127, 084114 (2007)).
The potentials are:
$V_{11}(R) = V_0 \tanh(a R) \
V_{22}(R) = -V_{11}(R) \
V_{12}(R) = V_{21}(R) = C e^{-D(R+f)^2}$
with $V_0 = 0.01$, $a=1.6$, $C=0.005$, $D=1$, and $f=0$.
End of explanation
"""
integ = mm.VerletIntegrator(dt)
simulation = app.Simulation(topology, sys11, integ)
simulation.context.setPositions(init_pos)
simulation.context.setVelocities(init_vel)
traj = []
forces = []
energies = []
for i in range(40000):
state = simulation.context.getState(getPositions=True,
getForces=True, getEnergy=True
)
pos = state.getPositions(asNumpy=True)
force = state.getForces(asNumpy=True)
energy = state.getPotentialEnergy()
forces.append(force[0][0] / force.unit)
energies.append(energy / energy.unit)
traj.append(pos[0][0] / pos.unit)
simulation.step(1)
plt.plot(traj, forces)
plt.plot(traj, energies)
plt.plot(traj)
"""
Explanation: Running a trajectory using just a single surface
End of explanation
"""
import openmm_mmst as mmst
Hmat = [[sys11, sys00], [sys00, sys22]]
#mmst_integ = NonadiabaticIntegrator(Hmat)
"""
Explanation: Running a trajectory using MMST with no coupling
This should give the same results as the single surface.
End of explanation
"""
Hmat = [[sys11, sys12], [sys12, sys22]]
#mmst_integ = NonadiabaticIntegrator(Hmat)
f = simulation.system.getForce(0)
"""
Explanation: Running a trajectory with the real MMST
End of explanation
"""
|
zingale/pyreaclib | modify-example.ipynb | bsd-3-clause | import pynucastro as pyna
reaclib_library = pyna.ReacLibLibrary()
"""
Explanation: Modifying Rates
Sometimes we want to change the nuclei involved in rates to simplify our network. Currently,
pynucastro supports changing the products. Here's an example.
End of explanation
"""
filter = pyna.RateFilter(reactants=["c12", "c12"])
mylib = reaclib_library.filter(filter)
mylib
"""
Explanation: We want to model ${}^{12}\mathrm{C} + {}^{12}\mathrm{C}$ reactions. There are 3 rates involved.
End of explanation
"""
r = mylib.get_rate("c12 + c12 --> n + mg23 <cf88_reaclib__reverse>")
r
"""
Explanation: The rate ${}^{12}\mathrm{C}({}^{12}\mathrm{C},n){}^{23}\mathrm{Mg}$ is quickly followed by ${}^{23}\mathrm{Mg}(n,\gamma){}^{24}\mathrm{Mg}$, so we want to modify that rate sequence to just be ${}^{12}\mathrm{C}({}^{12}\mathrm{C},\gamma){}^{24}\mathrm{Mg}$
End of explanation
"""
r.Q
"""
Explanation: This has the Q value:
End of explanation
"""
r.modify_products("mg24")
r
"""
Explanation: Now we modify it
End of explanation
"""
r.Q
"""
Explanation: and we see that the Q value has been updated to reflect the new endpoint
End of explanation
"""
mylib2 = reaclib_library.linking_nuclei(["p", "he4", "c12", "o16", "ne20", "na23", "mg24"])
"""
Explanation: Now let's build a network that includes the nuclei involved in our carbon burning. We'll start by leaving off the ${}^{23}\mathrm{Mg}$
End of explanation
"""
mylib2 += pyna.Library(rates=[r])
mylib2
rc = pyna.RateCollection(libraries=[mylib2])
rc.plot(rotated=True, curved_edges=True, hide_xalpha=True)
"""
Explanation: Now we add in our modified rate
End of explanation
"""
|
ohadravid/ml-tutorial | notebooks/402-ClusteringTextFromWiki2.ipynb | mit | df = pd.read_csv('../data/wiki/wiki.csv.gz', encoding='utf8', index_col=None)
df['text'] = df.text.str[:3000]
totalvocab_stemmed = []
totalvocab_tokenized = []
for doc_text in df.text:
allwords_stemmed = tokenize_and_stem(doc_text) #for each item in 'synopses', tokenize/stem
totalvocab_stemmed.extend(allwords_stemmed) #extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(doc_text)
totalvocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed)
vocab_frame.head(10)
len(vocab_frame)
"""
Explanation: Below I use my stemming/tokenizing and tokenizing functions to iterate over the list of synopses to create two vocabularies: one stemmed and one only tokenized.
End of explanation
"""
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(max_df=0.01, min_df=3,
stop_words='english',
tokenizer=tokenize_and_stem, ngram_range=(1,3))
%time tfidf_matrix = tfidf_vectorizer.fit_transform(df.text)
print(tfidf_matrix.shape)
terms = tfidf_vectorizer.get_feature_names()
len(vocab_frame)
terms = tfidf_vectorizer.get_feature_names()
idx = 1000
terms[idx]
terms[2001]
vocab_frame.ix[terms[idx]].head(5)
"""
Explanation: Tf-idf and document similarity
End of explanation
"""
from sklearn.cluster import KMeans
num_clusters =30
km = KMeans(n_clusters=num_clusters)
%time km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
len(clusters)
"""
Explanation: Lets cluster!
End of explanation
"""
clustered = df.join(pd.DataFrame({'cluster': clusters}))
clustered.head()
km.cluster_centers_
order_centroids = km.cluster_centers_.argsort()
order_centroids
term_words = vocab_frame.ix[terms[idx]]
term_words['words'].value_counts().keys()[0]
cluster_names = []
for cluster_centeroids in order_centroids:
words = []
for ind in cluster_centeroids[-6:]:
term_words = vocab_frame.ix[terms[ind].split(' ')]
best_word = term_words['words'].value_counts().keys()[0]
words.append(best_word)
cluster_names.append(', '.join(words))
cluster_names
clustered['cluster_name'] =clustered.cluster.map(lambda cluster: cluster_names[cluster])
clustered.head(60)
from sklearn.metrics.pairwise import cosine_similarity
dist = 1 - cosine_similarity(tfidf_matrix)
import os # for os.path.basename
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.manifold import MDS
MDS()
# two components as we're plotting points in a two-dimensional plane
# "precomputed" because we provide a distance matrix
# we will also specify `random_state` so the plot is reproducible.
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(dist) # shape (n_components, n_samples)
xs, ys = pos[:, 0], pos[:, 1]
import seaborn as sns
#set up colors per clusters using a dict
cluster_colors = sns.color_palette(n_colors=len(clusters))
%matplotlib inline
#create data frame that has the result of the MDS plus the cluster numbers and titles
df = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=df.name))
#group by cluster
groups = df.groupby('label')
# set up plot
fig, ax = plt.subplots(figsize=(30, 30)) # set size
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
for name, group in groups:
ax.plot(group.x, group.y, marker='o', linestyle='', ms=12, label=cluster_names[name], color=cluster_colors[name], mec='none')
ax.set_aspect('auto')
ax.tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
ax.tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
ax.legend(numpoints=1) #show legend with only 1 point
#add label in x,y position with the label as the film title
for i in range(len(df)):
ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=8)
#plt.show() #show the plot
#uncomment the below to save the plot if need be
plt.savefig('clusters_small_noaxes1.png', dpi=300)
"""
Explanation: But what did we get?
End of explanation
"""
|
google/CFU-Playground | proj/fccm_tutorial/Amaranth_for_CFUs.ipynb | apache-2.0 | # Install Amaranth
!pip install --upgrade 'amaranth[builtin-yosys]'
# CFU-Playground library
!git clone https://github.com/google/CFU-Playground.git
import sys
sys.path.append('CFU-Playground/python')
# Imports
from amaranth import *
from amaranth.back import verilog
from amaranth.sim import Delay, Simulator, Tick
from amaranth_cfu import TestBase, SimpleElaboratable, pack_vals, simple_cfu, InstructionBase, CfuTestBase
import re, unittest
# Utility to convert Amaranth to verilog
def convert_elaboratable(elaboratable):
v = verilog.convert(elaboratable, name='Top', ports=elaboratable.ports)
v = re.sub(r'\(\*.*\*\)', '', v)
return re.sub(r'^ *\n', '\n', v, flags=re.MULTILINE)
def runTests(klazz):
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(klazz))
runner = unittest.TextTestRunner()
runner.run(suite)
"""
Explanation: <a href="https://colab.research.google.com/github/alanvgreen/CFU-Playground/blob/fccm2/proj/fccm_tutorial/Amaranth_for_CFUs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Amaranth for CFUs
Copyright 2022 Google LLC.
SPDX-License-Identifier: Apache-2.0
This page shows
Incremental building of an Amaranth CFU
Simple examples of Amaranth's language features.
Also see:
https://github.com/amaranth-lang/amaranth
Docs: https://amaranth-lang.org/docs/amaranth/latest/
avg@google.com / 2022-04-19
This next cell initialises the libraries and Python path. Execute it before any others.
End of explanation
"""
class SingleMultiply(SimpleElaboratable):
def __init__(self):
self.a = Signal(signed(8))
self.b = Signal(signed(8))
self.result = Signal(signed(32))
def elab(self, m):
m.d.comb += self.result.eq((self.a + 128) * self.b)
class SingleMultiplyTest(TestBase):
def create_dut(self):
return SingleMultiply()
def test(self):
TEST_CASE = [
(1-128, 1, 1),
(33-128, -25, 33*-25),
]
def process():
for (a, b, expected) in TEST_CASE:
yield self.dut.a.eq(a)
yield self.dut.b.eq(b)
yield Delay(0.1)
self.assertEqual(expected, (yield self.dut.result))
yield
self.run_sim(process)
runTests(SingleMultiplyTest)
"""
Explanation: Four-way Multiply-Accumulate
These cells demonstrate the evolution of a full four-way multiply-accumulate CFU instruction.
SingleMultiply
Demonstrates a simple calculation: (a+128)*b
End of explanation
"""
class WordMultiplyAdd(SimpleElaboratable):
def __init__(self):
self.a_word = Signal(32)
self.b_word = Signal(32)
self.result = Signal(signed(32))
def elab(self, m):
a_bytes = [self.a_word[i:i+8].as_signed() for i in range(0, 32, 8)]
b_bytes = [self.b_word[i:i+8].as_signed() for i in range(0, 32, 8)]
m.d.comb += self.result.eq(
sum((a + 128) * b for a, b in zip(a_bytes, b_bytes)))
class WordMultiplyAddTest(TestBase):
def create_dut(self):
return WordMultiplyAdd()
def test(self):
def a(a, b, c, d): return pack_vals(a, b, c, d, offset=-128)
def b(a, b, c, d): return pack_vals(a, b, c, d, offset=0)
TEST_CASE = [
(a(99, 22, 2, 1), b(-2, 6, 7, 111), 59),
(a(63, 161, 15, 0), b(29, 13, 62, -38), 4850),
]
def process():
for (a, b, expected) in TEST_CASE:
yield self.dut.a_word.eq(a)
yield self.dut.b_word.eq(b)
yield Delay(0.1)
self.assertEqual(expected, (yield self.dut.result))
yield
self.run_sim(process)
runTests(WordMultiplyAddTest)
"""
Explanation: WordMultiplyAdd
Performs four (a + 128) * b operations in parallel, and adds the results.
End of explanation
"""
class WordMultiplyAccumulate(SimpleElaboratable):
def __init__(self):
self.a_word = Signal(32)
self.b_word = Signal(32)
self.accumulator = Signal(signed(32))
self.enable = Signal()
self.clear = Signal()
def elab(self, m):
a_bytes = [self.a_word[i:i+8].as_signed() for i in range(0, 32, 8)]
b_bytes = [self.b_word[i:i+8].as_signed() for i in range(0, 32, 8)]
calculations = ((a + 128) * b for a, b in zip(a_bytes, b_bytes))
summed = sum(calculations)
with m.If(self.enable):
m.d.sync += self.accumulator.eq(self.accumulator + summed)
with m.If(self.clear):
m.d.sync += self.accumulator.eq(0)
class WordMultiplyAccumulateTest(TestBase):
def create_dut(self):
return WordMultiplyAccumulate()
def test(self):
def a(a, b, c, d): return pack_vals(a, b, c, d, offset=-128)
def b(a, b, c, d): return pack_vals(a, b, c, d, offset=0)
DATA = [
# (a_word, b_word, enable, clear), expected accumulator
((a(0, 0, 0, 0), b(0, 0, 0, 0), 0, 0), 0),
# Simple tests: with just first byte
((a(10, 0, 0, 0), b(3, 0, 0, 0), 1, 0), 0),
((a(11, 0, 0, 0), b(-4, 0, 0, 0), 1, 0), 30),
((a(11, 0, 0, 0), b(-4, 0, 0, 0), 0, 0), -14),
# Since was not enabled last cycle, accumulator will not change
((a(11, 0, 0, 0), b(-4, 0, 0, 0), 1, 0), -14),
# Since was enabled last cycle, will change accumlator
((a(11, 0, 0, 0), b(-4, 0, 0, 0), 0, 1), -58),
# Accumulator cleared
((a(11, 0, 0, 0), b(-4, 0, 0, 0), 0, 0), 0),
# Uses all bytes (calculated on a spreadsheet)
((a(99, 22, 2, 1), b(-2, 6, 7, 111), 1, 0), 0),
((a(2, 45, 79, 22), b(-33, 6, -97, -22), 1, 0), 59),
((a(23, 34, 45, 56), b(-128, -121, 119, 117), 1, 0), -7884),
((a(188, 34, 236, 246), b(-87, 56, 52, -117), 1, 0), -3035),
((a(131, 92, 21, 83), b(-114, -72, -31, -44), 1, 0), -33997),
((a(74, 68, 170, 39), b(102, 12, 53, -128), 1, 0), -59858),
((a(16, 63, 1, 198), b(29, 36, 106, 62), 1, 0), -47476),
((a(0, 0, 0, 0), b(0, 0, 0, 0), 0, 1), -32362),
# Interesting bug
((a(128, 0, 0, 0), b(-104, 0, 0, 0), 1, 0), 0),
((a(0, 51, 0, 0), b(0, 43, 0, 0), 1, 0), -13312),
((a(0, 0, 97, 0), b(0, 0, -82, 0), 1, 0), -11119),
((a(0, 0, 0, 156), b(0, 0, 0, -83), 1, 0), -19073),
((a(0, 0, 0, 0), b(0, 0, 0, 0), 1, 0), -32021),
]
dut = self.dut
def process():
for (a_word, b_word, enable, clear), expected in DATA:
yield dut.a_word.eq(a_word)
yield dut.b_word.eq(b_word)
yield dut.enable.eq(enable)
yield dut.clear.eq(clear)
yield Delay(0.1) # Wait for input values to settle
# Check on accumulator, as calcuated last cycle
self.assertEqual(expected, (yield dut.accumulator))
yield Tick()
self.run_sim(process)
runTests(WordMultiplyAccumulateTest)
"""
Explanation: WordMultiplyAccumulate
Adds an accumulator to the four-way multiply and add operation.
Includes an enable signal to control when accumulation takes place and a clear signal to rest the accumulator.
End of explanation
"""
class Macc4Instruction(InstructionBase):
"""Simple instruction that provides access to a WordMultiplyAccumulate
The supported functions are:
* 0: Reset accumulator
* 1: 4-way multiply accumulate.
* 2: Read accumulator
"""
def elab(self, m):
# Build the submodule
m.submodules.macc4 = macc4 = WordMultiplyAccumulate()
# Inputs to the macc4
m.d.comb += macc4.a_word.eq(self.in0)
m.d.comb += macc4.b_word.eq(self.in1)
# Only function 2 has a defined response, so we can
# unconditionally set it.
m.d.comb += self.output.eq(macc4.accumulator)
with m.If(self.start):
m.d.comb += [
# We can always return control to the CPU on next cycle
self.done.eq(1),
# clear on function 0, enable on function 1
macc4.clear.eq(self.funct7 == 0),
macc4.enable.eq(self.funct7 == 1),
]
def make_cfu():
return simple_cfu({0: Macc4Instruction()})
class CfuTest(CfuTestBase):
def create_dut(self):
return make_cfu()
def test(self):
"Tests CFU plumbs to Madd4 correctly"
def a(a, b, c, d): return pack_vals(a, b, c, d, offset=-128)
def b(a, b, c, d): return pack_vals(a, b, c, d, offset=0)
# These values were calculated with a spreadsheet
DATA = [
# ((fn3, fn7, op1, op2), result)
((0, 0, 0, 0), None), # reset
((0, 1, a(130, 7, 76, 47), b(104, -14, -24, 71)), None), # calculate
((0, 1, a(84, 90, 36, 191), b(109, 57, -50, -1)), None),
((0, 1, a(203, 246, 89, 178), b(-87, 26, 77, 71)), None),
((0, 1, a(43, 27, 78, 167), b(-24, -8, 65, 124)), None),
((0, 2, 0, 0), 59986), # read result
((0, 0, 0, 0), None), # reset
((0, 1, a(67, 81, 184, 130), b(81, 38, -116, 65)), None),
((0, 1, a(208, 175, 180, 198), b(-120, -70, 8, 11)), None),
((0, 1, a(185, 81, 101, 108), b(90, 6, -92, 83)), None),
((0, 1, a(219, 216, 114, 236), b(-116, -9, -109, -16)), None),
((0, 2, 0, 0), -64723), # read result
((0, 0, 0, 0), None), # reset
((0, 1, a(128, 0, 0, 0), b(-104, 0, 0, 0)), None),
((0, 1, a(0, 51, 0, 0), b(0, 43, 0, 0)), None),
((0, 1, a(0, 0, 97, 0), b(0, 0, -82, 0)), None),
((0, 1, a(0, 0, 0, 156), b(0, 0, 0, -83)), None),
((0, 2, a(0, 0, 0, 0), b(0, 0, 0, 0)), -32021),
]
self.run_ops(DATA)
runTests(CfuTest)
"""
Explanation: CFU Wrapper
Wraps the preceding logic in a CFU. Uses funct7 to determine what function the WordMultiplyAccumulate unit should perform.
End of explanation
"""
class SyncAndComb(Elaboratable):
def __init__(self):
self.out = Signal(1)
self.ports = [self.out]
def elaborate(self, platform):
m = Module()
counter = Signal(12)
m.d.sync += counter.eq(counter + 1)
m.d.comb += self.out.eq(counter[-1])
return m
print(convert_elaboratable(SyncAndComb()))
"""
Explanation: Amaranth to Verilog Examples
These examples show Amaranth and the Verilog it is translated into.
SyncAndComb
Demonstrates synchronous and combinatorial logic with a simple component that outputs the high bit of a 12 bit counter.
End of explanation
"""
class ConditionalEnable(Elaboratable):
def __init__(self):
self.up = Signal()
self.down = Signal()
self.value = Signal(5)
self.ports = [self.value, self.up, self.down]
def elaborate(self, platform):
m = Module()
with m.If(self.up):
m.d.sync += self.value.eq(self.value + 1)
with m.Elif(self.down):
m.d.sync += self.value.eq(self.value - 1)
return m
print(convert_elaboratable(ConditionalEnable()))
"""
Explanation: Conditional Enable
Demonstrates Amaranth's equivalent to Verilog's if statement. A five bit counter is incremented when input signal up is high or decremented when down is high.
End of explanation
"""
class EdgeDetector(SimpleElaboratable):
"""Detects low-high transitions in a signal"""
def __init__(self):
self.input = Signal()
self.detected = Signal()
self.ports = [self.input, self.detected]
def elab(self, m):
last = Signal()
m.d.sync += last.eq(self.input)
m.d.comb += self.detected.eq(self.input & ~last)
class EdgeDetectorTestCase(TestBase):
def create_dut(self):
return EdgeDetector()
def test_with_table(self):
TEST_CASE = [
(0, 0),
(1, 1),
(0, 0),
(0, 0),
(1, 1),
(1, 0),
(0, 0),
]
def process():
for (input, expected) in TEST_CASE:
# Set input
yield self.dut.input.eq(input)
# Allow some time for signals to propagate
yield Delay(0.1)
self.assertEqual(expected, (yield self.dut.detected))
yield
self.run_sim(process)
runTests(EdgeDetectorTestCase)
print(convert_elaboratable(EdgeDetector()))
"""
Explanation: EdgeDetector
Simple edge detector, along with a test case.
End of explanation
"""
|
ananswam/bioscrape | inference examples/Multiple trajectories.ipynb | mit | %matplotlib inline
%config InlineBackend.figure_format = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 20
%matplotlib inline
import bioscrape as bs
from bioscrape.types import Model
from bioscrape.simulator import py_simulate_model
import numpy as np
import pylab as plt
import pandas as pd
M = Model(sbml_filename = 'toy_sbml_model.xml')
"""
Explanation: Parameter identification example
Here is a simple toy model that we use to demonstrate the working of the inference package
$\emptyset \xrightarrow[]{k_1} X \; \; \; \; X \xrightarrow[]{d_1} \emptyset$
End of explanation
"""
timepoints = np.linspace(0,20,100)
result = py_simulate_model(timepoints, Model = M)['X']
num_trajectories = 10
exp_data = pd.DataFrame()
exp_data['timepoints'] = timepoints
for i in range(num_trajectories):
exp_data['X' + str(i)] = result + np.random.normal(5, 2, size = np.shape(result))
plt.plot(timepoints, exp_data['X' + str(i)], 'r', alpha = 0.3)
plt.plot(timepoints, result, 'k', linewidth = 3, label = 'Model')
plt.legend()
plt.xlabel('Time')
plt.ylabel('[X]')
plt.show()
"""
Explanation: Generate experimental data
Simulate bioscrape model
Add Gaussian noise of non-zero mean and non-zero variance to the simulation
Create appropriate Pandas dataframes
Write the data to a CSV file
End of explanation
"""
exp_data.to_csv('birth_death_data.csv')
exp_data
"""
Explanation: CSV looks like:
End of explanation
"""
from bioscrape.inference import py_inference
# Import data from CSV
# Import a CSV file for each experiment run
exp_data = []
for i in range(num_trajectories):
df = pd.read_csv('birth_death_data.csv', usecols = ['timepoints', 'X'+str(i)])
df.columns = ['timepoints', 'X']
exp_data.append(df)
prior = {'k1' : ['uniform', 0, 100],'d1' : ['uniform',0,10]}
sampler, pid = py_inference(Model = M, exp_data = exp_data, measurements = ['X'], time_column = ['timepoints'],
nwalkers = 5, init_seed = 0.15, nsteps = 4000, sim_type = 'deterministic',
params_to_estimate = ['k1', 'd1'], prior = prior)
pid.plot_mcmc_results(sampler);
"""
Explanation: Run the bioscrape MCMC algorithm to identify parameters from the experimental data
End of explanation
"""
M_fit = Model('toy_model.xml')
timepoints = pid.timepoints[0]
flat_samples = sampler.get_chain(discard=200, thin=15, flat=True)
inds = np.random.randint(len(flat_samples), size=200)
for ind in inds:
sample = flat_samples[ind]
for pi, pi_val in zip(pid.params_to_estimate, sample):
M_fit.set_parameter(pi, pi_val)
plt.plot(timepoints, py_simulate_model(timepoints, Model= M_fit)['X'], "C1", alpha=0.6)
# plt.errorbar(, y, yerr=yerr, fmt=".k", capsize=0)
for i in range(num_trajectories):
plt.plot(timepoints, list(pid.exp_data[i]['X']), 'b', alpha = 0.1)
plt.plot(timepoints, result, "k", label="original model")
plt.legend(fontsize=14)
plt.xlabel("Time")
plt.ylabel("[X]");
plt.close()
plt.title('Log-likelihood progress')
plt.plot(pid.cost_progress)
plt.xlabel('Steps (all chains)')
plt.show()
"""
Explanation: Check mcmc_results.csv for the results of the MCMC procedure and perform your own analysis.
OR
You can also plot the results as follows
End of explanation
"""
|
molgor/spystats | notebooks/global_variogram.ipynb | bsd-2-clause | # Load Biospytial modules and etc.
%matplotlib inline
import sys
sys.path.append('/apps')
import django
django.setup()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
## Use the ggplot style
plt.style.use('ggplot')
from external_plugins.spystats import tools
%run ../testvariogram.py
%time vg = tools.Variogram(new_data,'residuals1',using_distance_threshold=500000)
### Test creation of chunks
chunks = tools.PartitionDataSet(new_data,namecolumnx='newLon',namecolumny='newLat',n_chunks=3)
sizes = map(lambda c : c.shape[0],chunks)
vg0 = tools.Variogram(chunks[0],response_variable_name='residuals1',using_distance_threshold=500000)
vg1 = tools.Variogram(chunks[1],response_variable_name='residuals1',using_distance_threshold=500000)
vg2 = tools.Variogram(chunks[2],response_variable_name='residuals1',using_distance_threshold=500000)
vg3 = tools.Variogram(chunks[3],response_variable_name='residuals1',using_distance_threshold=500000)
"""
Explanation: Process the global variogram
End of explanation
"""
%time vg0.plot(num_iterations=50,with_envelope=True)
chunks[0].plot(column='residuals1')
%time vg1.plot(num_iterations=50,with_envelope=True)
chunks[1].plot(column='residuals1')
%time vg2.plot(num_iterations=50,with_envelope=True)
chunks[2].plot(column='residuals1')
%time vg3.plot(num_iterations=50,with_envelope=True)
chunks[3].plot(column='residuals1')
envelopes = map(lambda c : c.envelope,chunks)
c = chunks[0]
variograms = [vg0,vg1,vg2,vg3]
envelopes = map(lambda v : v.envelope,variograms)
colors = plt.rcParams['axes.prop_cycle']
colors = ['red','green','grey','orange']
plt.figure(figsize=(12, 6))
for i,envelope in enumerate(envelopes):
plt.plot(envelope.lags,envelope.envhigh,'k--')
plt.plot(envelope.lags,envelope.envlow,'k--')
plt.fill_between(envelope.lags,envelope.envlow,envelope.envhigh,alpha=0.5,color=colors[i])
plt.plot(envelope.lags,envelope.variogram,'o--',lw=2.0,color=colors[i])
plt.legend(labels=['97.5%','emp. varig','2.5%'])
"""
Explanation: STOP HERE This will calculate the variogram with chunks
End of explanation
"""
filename = "../HEC_runs/results/low_q/data_envelope.csv"
envelope_data = pd.read_csv(filename)
"""
Explanation: Now the global variogram
For doing this I need to take a weighted average.
Or.. you can run it in the HEC machine! (as me did)
End of explanation
"""
def gaussianVariogram(h,sill=0,range_a=0,nugget=0):
g_h = ((sill - nugget)*(1 - np.exp(-(h**2 / range_a**2)))) + nugget
return g_h
hx = np.linspace(0,600000,100)
vg = tools.Variogram(new_data,'residuals1',using_distance_threshold=500000)
vg.envelope = envelope_data
vg.empirical = vg.envelope.variogram
vg.lags = vg.envelope.lags
vdata = vg.envelope.dropna()
from scipy.optimize import curve_fit
s = 0.345
r = 100000.0
nugget = 0.33
init_vals = [0.34, 50000, 0.33] # for [amp, cen, wid]
best_vals, covar = curve_fit(gaussianVariogram, xdata=vdata.lags, ydata=vdata.variogram, p0=init_vals)
s =best_vals[0]
r = best_vals[1]
nugget = best_vals[2]
fitted_gaussianVariogram = lambda x : gaussianVariogram(x,sill=s,range_a=r,nugget=nugget)
gammas = pd.DataFrame(map(fitted_gaussianVariogram,hx))
import functools
fitted_gaussian2 = functools.partial(gaussianVariogram,s,r,nugget)
print(s)
print(r)
print(nugget)
vg.plot(refresh=False)
plt.plot(hx,gammas,'green',lw=2)
Mdist = vg.distance_coordinates.flatten()
## Let's do a small subset
ch = Mdist[0:10000000]
#%time covariance_matrix = map(fitted_gaussianVariogram,Mdist)
%time vars = np.array(map(fitted_gaussianVariogram,ch))
plt.imshow(vars.reshape(1000,1000))
## Save it in redis
import redis
con = redis.StrictRedis(host='redis')
con.set('small_dist_mat1',vars)
import multiprocessing as multi
from multiprocessing import Manager
manager = Manager()
p=multi.Pool(processes=4)
%time vars = p.map(fitted_gaussian2,ch,chunksize=len(ch)/3)
%time vars = np.array(map(fitted_gaussianVariogram,ch))
88.36*30
"""
Explanation: Gaussian semivariogram
$\gamma (h)=(s-n)\left(1-\exp \left(-{\frac {h^{2}}{r^{2}a}}\right)\right)+n1_{{(0,\infty )}}(h)$
End of explanation
"""
|
dtamayo/MachineLearning | Day1/06_cross_validation.ipynb | gpl-3.0 | from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
# read in the iris data
iris = load_iris()
# create X (features) and y (response)
X = iris.data
y = iris.target
# use train/test split with different random_state values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
# check classification accuracy of KNN with K=5
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print metrics.accuracy_score(y_test, y_pred)
"""
Explanation: Cross-validation for parameter tuning, model selection, and feature selection
From the video series: Introduction to machine learning with scikit-learn
Agenda
What is the drawback of using the train/test split procedure for model evaluation?
How does K-fold cross-validation overcome this limitation?
How can cross-validation be used for selecting tuning parameters, choosing between models?
What are some possible improvements to cross-validation?
How to ensure cross-validation is correctly done.
Review of model evaluation procedures
Motivation: Need a way to choose between machine learning models
Goal is to estimate likely performance of a model on out-of-sample data
Initial idea: Train and test on the same data
But, maximizing training accuracy rewards overly complex models which overfit the training data
Alternative idea: Train/test split
Split the dataset into two pieces, so that the model can be trained and tested on different data
Testing accuracy is a better estimate than training accuracy of out-of-sample performance
But, it provides a high variance estimate since changing which observations happen to be in the testing set can significantly change testing accuracy
End of explanation
"""
# simulate splitting a dataset of 25 observations into 5 folds
from sklearn.cross_validation import KFold
kf = KFold(25, n_folds=5, shuffle=False)
# print the contents of each training and testing set
print '{} {:^61} {}'.format('Iteration', 'Training set observations', 'Testing set observations')
for iteration, data in enumerate(kf, start=1):
print '{:^9} {} {:^25}'.format(iteration, data[0], data[1])
"""
Explanation: Question: What if we created a bunch of train/test splits, calculated the testing accuracy for each, and averaged the results together?
Answer: That's the essense of K-fold cross-validation!
Steps for K-fold cross-validation
Split the dataset into K equal partitions (or "folds").
Use fold 1 as the testing set and the union of the other folds as the training set.
Calculate testing accuracy.
Repeat steps 2 and 3 K times, using a different fold as the testing set each time.
Use the average testing accuracy as the estimate of out-of-sample accuracy.
Diagram of 5-fold cross-validation:
End of explanation
"""
from sklearn.cross_validation import cross_val_score
# 10-fold cross-validation with K=5 for KNN (the n_neighbors parameter)
knn = KNeighborsClassifier(n_neighbors=5)
scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy')
print scores
# use average accuracy as an estimate of out-of-sample accuracy
print scores.mean()
# search for an optimal value of K for KNN
k_range = range(1, 31)
k_scores = []
import matplotlib.pyplot as plt
%matplotlib inline
# plot the value of K for KNN (x-axis) versus the cross-validated accuracy (y-axis)
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
"""
Explanation: Dataset contains 25 observations (numbered 0 through 24)
5-fold cross-validation, thus it runs for 5 iterations
For each iteration, every observation is either in the training set or the testing set, but not both
Every observation is in the testing set exactly once
Comparing cross-validation to train/test split
Advantages of cross-validation:
More accurate estimate of out-of-sample accuracy
More "efficient" use of data (every observation is used for both training and testing)
Advantages of train/test split:
Runs K times faster than K-fold cross-validation
Simpler to examine the detailed results of the testing process
Cross-validation recommendations
K can be any number, but K=10 is generally recommended
For classification problems, stratified sampling is recommended for creating the folds
Each response class should be represented with equal proportions in each of the K folds
scikit-learn's cross_val_score function does this by default
Cross-validation example: parameter tuning
Goal: Select the best tuning parameters (aka "hyperparameters") for KNN on the iris dataset
End of explanation
"""
# 10-fold cross-validation with the best KNN model
knn = KNeighborsClassifier(n_neighbors=20)
print cross_val_score(knn, X, y, cv=10, scoring='accuracy').mean()
# 10-fold cross-validation with logistic regression
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
print cross_val_score(logreg, X, y, cv=10, scoring='accuracy').mean()
"""
Explanation: Cross-validation example: model selection
Goal: Compare the best KNN model with logistic regression on the iris dataset
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
# plot the value of K for KNN (x-axis) versus the cross-validated accuracy (y-axis)
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
"""
Explanation: Improvements to cross-validation
Repeated cross-validation
Repeat cross-validation multiple times (with different random splits of the data) and average the results
More reliable estimate of out-of-sample performance by reducing the variance associated with a single trial of cross-validation
Creating a hold-out set
"Hold out" a portion of the data before beginning the model building process
Locate the best model using cross-validation on the remaining data, and test it using the hold-out set
More reliable estimate of out-of-sample performance since hold-out set is truly out-of-sample
Splitting a Second Time? Train/Cross-Validation/Test Split
End of explanation
"""
|
KrisCheng/ML-Learning | archive/MOOC/Deeplearning_AI/ImprovingDeepNeuralNetworks/SettingupyourMachineLearningApplication/Gradient+Checking.ipynb | mit | # Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
"""
Explanation: Gradient Checking
Welcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking.
You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker.
But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".
Let's do it!
End of explanation
"""
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
### START CODE HERE ### (approx. 1 line)
J = theta * x
### END CODE HERE ###
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
"""
Explanation: 1) How does gradient checking work?
Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.
Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$.
Let's look back at the definition of a derivative (or gradient):
$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small."
We know the following:
$\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly.
You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct.
Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct!
2) 1-dimensional gradient checking
Consider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input.
You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct.
<img src="images/1Dgrad_kiank.png" style="width:600px;height:250px;">
<caption><center> <u> Figure 1 </u>: 1D linear model<br> </center></caption>
The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation").
Exercise: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions.
End of explanation
"""
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
"""
Explanation: Expected Output:
<table style=>
<tr>
<td> ** J ** </td>
<td> 8</td>
</tr>
</table>
Exercise: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$.
End of explanation
"""
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon = 1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = forward_propagation(x, thetaplus) # Step 3
J_minus = forward_propagation(x, thetaminus) # Step 4
gradapprox = (J_plus - J_minus) / (2 * epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = backward_propagation(x, theta)
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print ("The gradient is correct!")
else:
print ("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
"""
Explanation: Expected Output:
<table>
<tr>
<td> ** dtheta ** </td>
<td> 2 </td>
</tr>
</table>
Exercise: To show that the backward_propagation() function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking.
Instructions:
- First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow:
1. $\theta^{+} = \theta + \varepsilon$
2. $\theta^{-} = \theta - \varepsilon$
3. $J^{+} = J(\theta^{+})$
4. $J^{-} = J(\theta^{-})$
5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$
- Then compute the gradient using backward propagation, and store the result in a variable "grad"
- Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula:
$$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$
You will need 3 Steps to compute this formula:
- 1'. compute the numerator using np.linalg.norm(...)
- 2'. compute the denominator. You will need to call np.linalg.norm(...) twice.
- 3'. divide them.
- If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.
End of explanation
"""
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
"""
Explanation: Expected Output:
The gradient is correct!
<table>
<tr>
<td> ** difference ** </td>
<td> 2.9193358103083e-10 </td>
</tr>
</table>
Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in backward_propagation().
Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it!
3) N-dimensional gradient checking
The following figure describes the forward and backward propagation of your fraud detection model.
<img src="images/NDgrad_kiank.png" style="width:600px;height:400px;">
<caption><center> <u> Figure 2 </u>: deep neural network<br>LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID</center></caption>
Let's look at your implementations for forward propagation and backward propagation.
End of explanation
"""
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
"""
Explanation: Now, run backward propagation.
End of explanation
"""
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs two parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference > 1e-7:
print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
"""
Explanation: You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct.
How does gradient checking work?.
As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still:
$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "dictionary_to_vector()" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.
The inverse function is "vector_to_dictionary" which outputs back the "parameters" dictionary.
<img src="images/dictionary_to_vector.png" style="width:600px;height:400px;">
<caption><center> <u> Figure 2 </u>: dictionary_to_vector() and vector_to_dictionary()<br> You will need these functions in gradient_check_n()</center></caption>
We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that.
Exercise: Implement gradient_check_n().
Instructions: Here is pseudo-code that will help you implement the gradient check.
For each i in num_parameters:
- To compute J_plus[i]:
1. Set $\theta^{+}$ to np.copy(parameters_values)
2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$
3. Calculate $J^{+}_i$ using to forward_propagation_n(x, y, vector_to_dictionary($\theta^{+}$ )).
- To compute J_minus[i]: do the same thing with $\theta^{-}$
- Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$
Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to parameter_values[i]. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute:
$$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$
End of explanation
"""
|
brianspiering/word2vec-talk | word2vec_demo.ipynb | apache-2.0 | reset -fs
import collections
import math
import os
from pprint import pprint
import random
import urllib.request
import zipfile
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.manifold import TSNE
%matplotlib inline
"""
Explanation: Apply word2vec to dataset
Overview:
Download some training data
Setup word2vec model in TensorFlow
Train on the data a bit
Visualize the result
This notebook is based on a TensorFlow tutorial.
End of explanation
"""
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
"""
Explanation: Step 1: Download the data.
End of explanation
"""
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Dataset size: {:,} words'.format(len(words)))
# Let's have a peek at the head
words[:5]
# Let's have a peek at the tail
words[-5:]
"""
Explanation: ☝️ While this code is running, preview the code ahead.
End of explanation
"""
vocabulary_size = 50000
def build_dataset(words):
""" Replace rare words with UNK token which stands for "unknown".
It is called a dustbin category, aka sweep the small count items into a single group.
"""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Reduce memory by getting rid of the "heavy" list of strings
data[:5] # An index of each word (as it appears in order) to its rank. Therefore we don't have reference the string
dictionary['the'] # word: rank
reverse_dictionary[42] # rank: word
print('Most common words:')
pprint(count[:5])
print('Most least words:')
pprint(count[-5:])
"""
Explanation: Notice: None of the words are capitalized and there is no punctuation.
Preprocessing is an art. It depends on your raw data and how you plan to use your final data. You might want to encode ngrams, instead of just splitting all items into unigrams.
Step 2: Build the dictionary
End of explanation
"""
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8,
num_skips=2,
skip_window=1)
"""
Explanation: Step 3: Function to generate a training batch for the skip-gram model.
End of explanation
"""
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
# TensorFlow setup
graph = tf.Graph()
with graph.as_default():
# Input data
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
"""
Explanation: Step 4: Build and train a skip-gram model.
End of explanation
"""
num_steps = 1000 #100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to '%s':" % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
"""
Explanation: Step 5: Begin training.
End of explanation
"""
tsne = TSNE(perplexity=30,
n_components=2,
init='pca',
n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in range(plot_only)]
n_words_to_visualize = 40
for i, label in enumerate(labels[:n_words_to_visualize]):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
"""
Explanation: Step 6: Visualize the embeddings.
We'll use t-sne.
t-sne is a cool way to visualize of high-dimensional datasets by reducing the number dimensions.
End of explanation
"""
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in range(plot_only)]
plot_with_labels(low_dim_embs, labels)
"""
Explanation: Reflective Questions
What do you see?
How would you describe the relationships?
Let's render and save more samples.
End of explanation
"""
|
luctrudeau/Teaching | AsyncIOisAwesome/AsyncIOisAwesome.ipynb | lgpl-3.0 | import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Asynchronous IO is Awesome
End of explanation
"""
interupts1 = [4898, 4708, 4698, 4730, 4614, 4679, 4686, 4739, 4690, 4743, 3250, 4217]
interuptsN = [3299, 4328, 4498, 4346, 4412, 4417, 4321, 4493, 4514, 4432, 4366, 4519]
interuptsT = [3373, 4287, 4215, 4212, 4258, 4332, 4139, 4132, 4351, 4184, 4251, 4192]
plt.plot(interupts1)
plt.plot(interuptsN)
plt.plot(interuptsT)
plt.legend(['1 Thread', 'N Threads', 'Tornado'], loc='center left', bbox_to_anchor=(1, 0.5))
ram = [8880128, 8880128, 8880128, 8880128, 8880128, 8880128, 8880128, 8880128, 8880128, 8880128, 8880128]
ramN = [18079744, 22675456, 25309184, 28704768, 26828800, 24875008, 24223744, 24932352, 25497600, 24223744, 23556096]
ramT = [12206080, 12288000, 12349440, 12562432, 12627968, 12648448, 12693504, 12750848, 12836864, 13062144, 13099008, 13127680]
plt.plot(ram)
plt.plot(ramN)
plt.plot(ramT)
plt.legend(['1 Thread', 'N Threads', 'Tornado'], loc='center left', bbox_to_anchor=(1, 0.5))
threads1 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
threadsN = [18, 23, 28, 12, 11, 30, 29, 17, 10, 30,28]
threadsT = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
plt.plot(threads1)
plt.plot(threadsN)
plt.plot(threadsT)
plt.legend(['1 Thread', 'N Threads', 'Tornado'], loc='center left', bbox_to_anchor=(1, 0.5))
cpu1 = [86, 83, 83, 82, 83, 82, 84, 83, 81, 49, 72];
cpuN = [83, 82, 82, 81, 82, 82, 82, 83, 82, 84, 81];
cpuT = [85, 83, 83, 83, 83, 83, 82, 84, 83, 83, 83];
plt.plot(cpu1)
plt.plot(cpuN)
plt.plot(cpuT)
plt.legend(['1 Thread', 'N Threads', 'Tornado'], loc='center left', bbox_to_anchor=(1, 0.5))
"""
Explanation: Serveur Mono Thread
End of explanation
"""
import socket
from IPython.display import Image
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.3.14" , 8080))
s.sendall(b'GET /car.jpg HTTP/1.1\r\n\r\n')
recvd
while True:
data = s.recv(1024)
if not data:
break
recvd += data
s.shutdown(1)
s.close()
data = recvd.split(b'\r\n\r\n', 1)
print(data[0].decode())
print(len(data[1]))
Image(data=data[1])
"""
Explanation: Le Socket de Berkley
End of explanation
"""
import selectors
import socket
sel = selectors.DefaultSelector()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(False)
s.connect(("192.168.3.14" , 8080))
sel.register(s, selectors.EVENT_WRITE, connected)
# Connection
events = sel.select()
for key, mask in events:
print(key)
callback = key.data
callback(key.fileobj, mask)
def connected(sock, mask):
print('Connected!')
sock.sendall(b'GET /car.jpg HTTP/1.1\r\n\r\n')
sel.modify(sock, selectors.EVENT_READ, read)
"""
Explanation: Le problème est que s.connect(("192.168.3.14" , 8080)), s.sendall(message) et s.recv(1024) sont bloquants.
Alors pendant ce temps le Thread attends
S'il se trouve dans un Threadpool, cela peut paralyser le système.
IO Asynchrone (Selector / Reator)
Acceptor-Connector
End of explanation
"""
import selectors
import socket
from IPython.display import Image
sel = selectors.DefaultSelector()
# Connector
def connected(sock, mask):
print('Connected!')
sock.sendall(b'GET /car.jpg HTTP/1.1\r\n\r\n')
sel.modify(sock, selectors.EVENT_READ, read)
# Handler
def read(sock, mask):
data = sock.recv(1024)
if not data:
print('Done!')
sel.unregister(sock)
return data
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.3.14" , 8080))
s.setblocking(False)
sel.register(s, selectors.EVENT_WRITE, connected)
recvd = b''
# Connection
events = sel.select()
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
# Le Reactor
loop = True
while loop:
events = sel.select()
for key, mask in events:
callback = key.data
data = callback(key.fileobj, mask)
if not data:
loop = False
recvd += data
data = recvd.split(b'\r\n\r\n', 1)
print(data[0].decode())
Image(data=data[1])
"""
Explanation: Le Connector enregistre le prochain handler
sel.modify(sock, selectors.EVENT_READ, read)
End of explanation
"""
|
pombredanne/gensim | docs/notebooks/doc2vec-lee.ipynb | lgpl-2.1 | import gensim
import os
import collections
import random
"""
Explanation: Doc2Vec Tutorial on the Lee Dataset
End of explanation
"""
# Set file names for train and test data
test_data_dir = '{}'.format(os.sep).join([gensim.__path__[0], 'test', 'test_data'])
lee_train_file = test_data_dir + os.sep + 'lee_background.cor'
lee_test_file = test_data_dir + os.sep + 'lee.cor'
"""
Explanation: What is it?
Doc2Vec is an NLP tool for representing documents as a vector and is a generalizing of the Word2Vec method. This tutorial will serve as an introduction to Doc2Vec and present ways to train and assess a Doc2Vec model.
Resources
Word2Vec Paper
Doc2Vec Paper
Dr. Michael D. Lee's Website
Lee Corpus
IMDB Doc2Vec Tutorial
Getting Started
To get going, we'll need to have a set of documents to train our doc2vec model. In theory, a document could be anything from a short 140 character tweet, a single paragraph (i.e., journal article abstract), a news article, or a book. In NLP parlance a collection or set of documents is often referred to as a <b>corpus</b>.
For this tutorial, we'll be training our model using the Lee Background Corpus included in gensim. This corpus contains 314 documents selected from the Australian Broadcasting
Corporation’s news mail service, which provides text e-mails of headline stories and covers a number of broad topics.
And we'll test our model by eye using the much shorter Lee Corpus which contains 50 documents.
End of explanation
"""
def read_corpus(fname, tokens_only=False):
with open(fname, encoding="iso-8859-1") as f:
for i, line in enumerate(f):
if tokens_only:
yield gensim.utils.simple_preprocess(line)
else:
# For training data, add tags
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [i])
train_corpus = list(read_corpus(lee_train_file))
test_corpus = list(read_corpus(lee_test_file, tokens_only=True))
"""
Explanation: Define a Function to Read and Preprocess Text
Below, we define a function to open the train/test file (with latin encoding), read the file line-by-line, pre-process each line using a simple gensim pre-processing tool (i.e., tokenize text into individual words, remove punctuation, set to lowercase, etc), and return a list of words. Note that, for a given file (aka corpus), each continuous line constitutes a single document and the length of each line (i.e., document) can vary. Also, to train the model, we'll need to associate a tag/number with each document of the training corpus. In our case, the tag is simply the zero-based line number.
End of explanation
"""
train_corpus[:2]
"""
Explanation: Let's take a look at the training corpus
End of explanation
"""
print(test_corpus[:2])
"""
Explanation: And the testing corpus looks like this:
End of explanation
"""
model = gensim.models.doc2vec.Doc2Vec(size=50, min_count=2, iter=10)
"""
Explanation: Notice that the testing corpus is just a list of lists and does not contain any tags.
Training the Model
Instantiate a Doc2Vec Object
Now, we'll instantiate a Doc2Vec model with a vector size with 50 words and iterating over the training corpus 10 times. We set the minimum word count to 2 in order to give higher frequency words more weighting. Model accuracy can be improved by increasing the number of iterations but this generally increases the training time.
End of explanation
"""
model.build_vocab(train_corpus)
"""
Explanation: Build a Vocabulary
End of explanation
"""
%time model.train(train_corpus)
"""
Explanation: Essentially, the vocabulary is a dictionary (accessible via model.vocab) of all of the unique words extracted from the training corpus along with the count (e.g., model.vocab['penalty'].count for counts for the word penalty).
Time to Train
This should take no more than 2 minutes
End of explanation
"""
model.infer_vector(['only', 'you', 'can', 'prevent', 'forrest', 'fires'])
"""
Explanation: Inferring a Vector
One important thing to note is that you can now infer a vector for any piece of text without having to re-train the model by passing a list of words to the model.infer_vector function. This vector can then be compared with other vectors via cosine similarity.
End of explanation
"""
ranks = []
second_ranks = []
for doc_id in range(len(train_corpus)):
inferred_vector = model.infer_vector(train_corpus[doc_id].words)
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
rank = [docid for docid, sim in sims].index(doc_id)
ranks.append(rank)
second_ranks.append(sims[1])
"""
Explanation: Assessing Model
To assess our new model, we'll first infer new vectors for each document of the training corpus, compare the inferred vectors with the training corpus, and then returning the rank of the document based on self-similarity. Basically, we're pretending as if the training corpus is some new unseen data and then seeing how they compare with the trained model. The expectation is that we've likely overfit our model (i.e., all of the ranks will be less than 2) and so we should be able to find similar documents very easily. Additionally, we'll keep track of the second ranks for a comparison of less similar documents.
End of explanation
"""
collections.Counter(ranks) #96% accuracy
"""
Explanation: Let's count how each document ranks with respect to the training corpus
End of explanation
"""
print('Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
"""
Explanation: Basically, greater than 95% of the inferred documents are found to be most similar to itself and about 5% of the time it is mistakenly most similar to another document. This is great and not entirely surprising. We can take a look at an example:
End of explanation
"""
# Pick a random document from the test corpus and infer a vector from the model
doc_id = random.randint(0, len(train_corpus))
# Compare and print the most/median/least similar documents from the train corpus
print('Train Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
sim_id = second_ranks[doc_id]
print('Similar Document {}: «{}»\n'.format(sim_id, ' '.join(train_corpus[sim_id[0]].words)))
"""
Explanation: Notice above that the most similar document is has a similarity score of ~80% (or higher). However, the similarity score for the second ranked documents should be significantly lower (assuming the documents are in fact different) and the reasoning becomes obvious when we examine the text itself
End of explanation
"""
# Pick a random document from the test corpus and infer a vector from the model
doc_id = random.randint(0, len(test_corpus))
inferred_vector = model.infer_vector(test_corpus[doc_id])
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
# Compare and print the most/median/least similar documents from the train corpus
print('Test Document ({}): «{}»\n'.format(doc_id, ' '.join(test_corpus[doc_id])))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
"""
Explanation: Testing the Model
Using the same approach above, we'll infer the vector for a randomly chosen test document, and compare the document to our model by eye.
End of explanation
"""
|
Upward-Spiral-Science/spect-team | Code/Assignment-5/Classification.ipynb | apache-2.0 | import pandas as pd
import numpy as np
# Our data is cleaned by cleaning utility code
df = pd.read_csv('Clean_Data_Adults_1.csv')
# Separate labels and Features
df_labels = df['Depressed']
df_feats = df.drop(['Depressed', 'Unnamed: 0'], axis=1, inplace=False)
X = df_feats.get_values() # features
y = df_labels.get_values() # labels
'''
Get rid of the negative values of the race_id columns
W.L.O.G., subtract the minimum negative from the entire column
'''
def clean_negs(X):
# Get indices of columns that contain negative values
neg_col_inds = np.unique(np.where(X<0)[1])
# Subtract minimum negative for each column
for neg_i in neg_col_inds:
neg_col = X[:, neg_i]
min_neg = np.min(neg_col)
new_col = [c - min_neg for c in neg_col]
X[:, neg_i] = new_col
return X
# Preprocess training features
X = clean_negs(X)
"""
Explanation: Depression Identification
End of explanation
"""
'''
Data Preparation
'''
from sklearn.cross_validation import train_test_split
# Split the simulated data into training set and test set
# Randomly sample 20% data as the test set
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=42)
print 'Training set size is', train_X.shape
print 'Testing set size is', test_X.shape
max_n = len(train_X)
# Train the given classifier
def train_clf(clf, train_feats, train_labels):
# Supervised training
clf.fit(train_feats, train_labels)
# Test the given classifier anc calculate accuracy
def test_clf(clf, test_feats, test_labels):
# Predict using test set
predicted = clf.predict(test_feats)
# Compute accuracy
acc = np.mean(predicted == test_labels)
return predicted, acc
"""
Explanation: 1. State assumptions about your data
X are i.i.d. random variables.
y ~ Bern(p)
2. Formally define classification problem
Feature matrix X: R<sup>n x m</sup>
Each sample X<sub>i</sub> ∈ R<sup>m</sup>, i ∈ [1, n]
Label y<sub>i</sub> ∈ {0, 1}, i ∈ [1, n]
g(X) → y
G := { g: R → {0, 1} }
Goal: g<sup>*</sup> = argmin<sub>g ∈ G</sub> E[L(g(X), y)], where L denotes loss function.
The loss function L differs for different classifiers and is speficied in the classfication context below:
Multinomial Naive Bayes: negative joint likelihood
L = -log p(X, y)
Logistic Regression: logistic loss (cross-entropy loss)
L = -log P(y|g(X)) = -(y · log(g(X)) + (1 - y) · log(1 - g(X))
K Nearest Neighbors
L = ∑<sub>i</sub> D(X<sub>i</sub>|y<sub>i</sub>=1, X|y=1) + ∑<sub>i</sub> D(X<sub>i</sub>|y<sub>i</sub>=0, X|y=0)
D(a, b) = (a - b)<sup>2</sup>
Support Vector Machine: squared hinge loss
L = (max{0, 1 − y · g(x)})<sup>2</sup>
Random Forest
L = ∑<sub>i</sub> (g(X<sub>i</sub>) - y<sub>i</sub>)<sup>2</sup>
3. Provide algorithm for solving problem (including choosing hyperparameters as appropriate)
Multinomial Naive Bayes
alpha: default =1.0 (Laplace smoothing parameter)
fit_prior: default = True (learn class prior probabilities)
Logistic Regression
penalty = 'l1' (l1 norm for penalization)
K Nearest Neighbors
n_neighbors = 10
Support Vector Machine (Linear Kernel)
C: default = 1.0 (penalty parameter of the error term)
Random Forest
n_estimators = 60 (number of trees)
criterion: default = 'gini'
4. Sample data from a simulation setting inspired by your data
End of explanation
"""
# Compute accuracy of a model trained with a specific number of samples
def compute_acc(clf, n):
train_clf(clf, train_X[:n], train_y[:n])
predict_y, acc = test_clf(clf, test_X, test_y)
return acc
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
[acc_NB, acc_LG, acc_KNN, acc_SVM, acc_RF] = [[] for i in xrange(5)]
for n in xrange(100, max_n, 100):
# Multinomial Naive Bayes
multiNB = MultinomialNB()
acc_NB.append(compute_acc(multiNB, n))
# Logistic Regression
lg = LogisticRegression(penalty='l1')
acc_LG.append(compute_acc(lg, n))
# K Nearest Neighbors
knn = KNeighborsClassifier(n_neighbors=10)
acc_KNN.append(compute_acc(knn, n))
# Support Vector Machine
svc = LinearSVC()
acc_SVM.append(compute_acc(svc, n))
# Random Forest
rf = RandomForestClassifier(n_estimators=60)
acc_RF.append(compute_acc(rf, n))
"""
Explanation: 5. Compute accuracy
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
sizes = range(100, max_n, 100)
fig = plt.figure(1)
fig.set_size_inches(9, 6.5)
plt.plot(sizes, acc_NB, label='Multinomial NB')
plt.plot(sizes, acc_LG, label='Logistic Regression')
plt.plot(sizes, acc_KNN, label='K Nearest Neighbors')
plt.plot(sizes, acc_SVM, label='Support Vector Machine')
plt.plot(sizes, acc_RF, label='Random Forest')
plt.legend(loc='best')
plt.xlabel('sample size')
plt.ylabel('Simulation Accuracy')
"""
Explanation: 6. Plot accuracy vs. sample size in simulation
End of explanation
"""
'''
Train models with all the training data
Evaluate using the test data
'''
# Multinomial Naive Bayes
multiNB = MultinomialNB()
train_clf(multiNB, train_X, train_y)
predict_y, acc_nb = test_clf(multiNB, test_X, test_y)
# Logistic Regression
lg = LogisticRegression(penalty='l1')
train_clf(lg, train_X, train_y)
predict_y, acc_lg = test_clf(lg, test_X, test_y)
# K Nearest Neighbors
knn = KNeighborsClassifier(n_neighbors=10)
train_clf(knn, train_X, train_y)
predict_y, acc_knn = test_clf(knn, test_X, test_y)
# Support Vector Machine
svc = LinearSVC()
train_clf(svc, train_X, train_y)
predict_y, acc_svc = test_clf(svc, test_X, test_y)
# Random Forest
rf = RandomForestClassifier(n_estimators=60)
train_clf(rf, train_X, train_y)
predict_y, acc_rf = test_clf(rf, test_X, test_y)
print 'Multinomial Naive Bayes accuracy is', acc_nb
print 'Logistic Regression accuracy is', acc_lg
print 'K Nearest Neighbors accuracy is', acc_knn
print 'Support Vector Machine (Linear Kernel) accuracy is', acc_svc
print 'Random Forest accuracy is', acc_rf
# Visualize classifier performance
x = range(5)
y = [acc_nb, acc_lg, acc_knn, acc_svc, acc_rf]
clf_names = ['Multinomial Naive Bayes', 'Logistic Regression', \
'K Nearest Neighbors', 'Support Vector Machine', 'Random Forest']
width = 0.6/1.2
plt.bar(x, y, width)
plt.title('Classifier Performance')
plt.xticks(x, clf_names, rotation=25)
plt.ylabel('Accuracy')
"""
Explanation: 7. Apply method directly on real data
End of explanation
"""
|
materialsvirtuallab/nano106 | lectures/lecture_4_point_group_symmetry/Symmetry Computations on m-3m (O_h) Point Group.ipynb | bsd-3-clause | import numpy as np
from sympy import symbols, Mod
from symmetry.groups import PointGroup
#Create the point group.
oh = PointGroup("m-3m")
print "The generators for this point group are:"
for m in oh.generators:
print m
print "The order of the point group is %d." % len(oh.symmetry_ops)
"""
Explanation: NANO106 - Symmetry Computations on $m\overline{3}m$ ($O_h$) Point Group
by Shyue Ping Ong
This notebook demonstrates the computation of orbits in the $m\overline{3}m$ ($O_h$) point group (more complex than the simple mmm example). It is part of course material for UCSD's NANO106 - Crystallography of Materials.
Initializing the $m\overline{3}m$ point group.
Let's start by importing the numpy, sympy and other packages we need. Instead of going through all the steps one by one, we will use the symmetry.point_groups written by Prof Ong, which basically consolidates the information for all point groups in a single module.
End of explanation
"""
x, y, z = symbols("x y z")
def pt_2_str(pt):
return str([i.args[0] if isinstance(i, Mod) else i for i in pt])
"""
Explanation: Computing Orbits
Using sympy, we specify the symbolic symbols x, y, z to represent position coordinates. We also define a function to generate the orbit given a set of symmetry operations and a point p.
End of explanation
"""
p = np.array([x, y, z])
orb = oh.get_orbit(p, tol=0)
print "For the general position %s on the two-fold axis, the orbit comprise %d points:" % (str(p), len(orb))
for o in orb:
print pt_2_str(o),
"""
Explanation: Orbit for General Position
End of explanation
"""
p = np.array([0, 0, z])
orb = oh.get_orbit(p, tol=0)
print "For the special position %s on the two-fold axis, the orbit comprise %d points:" % (str(p), len(orb))
for o in orb:
print pt_2_str(o),
"""
Explanation: Orbit for Special Position on four-fold rotation axis
End of explanation
"""
p = np.array([x, x, x])
orb = oh.get_orbit(p, tol=0)
print "For the special position %s on the two-fold axis, the orbit comprise %d points:" % (str(p), len(orb))
for o in orb:
print pt_2_str(o),
"""
Explanation: The orbit is similar for the other two-fold axes on the a and b axes are similar.
Orbit for Special Position on three-fold rotation axis
The three-fold rotation axis are given by (x, x, x)
End of explanation
"""
p = np.array([x, x, 0])
orb = oh.get_orbit(p, tol=0)
print "For the special position %s on the two-fold axis, the orbit comprise %d points:" % (str(p), len(orb))
for o in orb:
print pt_2_str(o),
"""
Explanation: Orbit for Special Position on two-fold rotation axis
The two-fold rotation axis are given by (x, x, 0).
End of explanation
"""
p = np.array([x, y, 0])
orb = oh.get_orbit(p, tol=0)
print "For the special position %s on the two-fold axis, the orbit comprise %d points:" % (str(p), len(orb))
for o in orb:
print pt_2_str(o),
"""
Explanation: Orbit for Special Position on mirror planes
Positions on the mirror on the a-b plane have coordinates (x, y, 0).
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.