id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
18,117 | import json
import os
from dataclasses import dataclass
import numpy as np
import pyarrow as pa
import datasets
from utils import get_duration
SPEED_TEST_N_EXAMPLES = 100_000_000_000
SPEED_TEST_CHUNK_SIZE = 10_000
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
def generate_100B_dataset(num_examples: int, chunk_size: int) -> datasets.Dataset:
table = pa.Table.from_pydict({"col": [0] * chunk_size})
table = pa.concat_tables([table] * (num_examples // chunk_size))
return datasets.Dataset(table, fingerprint="table_100B")
def get_first_row(dataset: datasets.Dataset):
_ = dataset[0]
def get_last_row(dataset: datasets.Dataset):
_ = dataset[-1]
def get_batch_of_1024_rows(dataset: datasets.Dataset):
_ = dataset[range(len(dataset) // 2, len(dataset) // 2 + 1024)]
def get_batch_of_1024_random_rows(dataset: datasets.Dataset):
_ = dataset[RandIter(0, len(dataset), 1024, seed=42)]
def benchmark_table_100B():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
functions = (get_first_row, get_last_row, get_batch_of_1024_rows, get_batch_of_1024_random_rows)
print("generating dataset")
dataset = generate_100B_dataset(num_examples=SPEED_TEST_N_EXAMPLES, chunk_size=SPEED_TEST_CHUNK_SIZE)
print("Functions")
for func in functions:
print(func.__name__)
times[func.__name__] = func(dataset)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8")) | null |
18,118 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
SPEED_TEST_N_EXAMPLES = 50_000
SMALL_TEST = 5_000
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
def read(dataset: datasets.Dataset, length):
for i in range(length):
_ = dataset[i]
def read_batch(dataset: datasets.Dataset, length, batch_size):
for i in range(0, len(dataset), batch_size):
_ = dataset[i : i + batch_size]
def read_formatted(dataset: datasets.Dataset, length, type):
with dataset.formatted_as(type=type):
for i in range(length):
_ = dataset[i]
def read_formatted_batch(dataset: datasets.Dataset, length, batch_size, type):
with dataset.formatted_as(type=type):
for i in range(0, length, batch_size):
_ = dataset[i : i + batch_size]
def generate_example_dataset(dataset_path, features, num_examples=100, seq_shapes=None):
dummy_data = generate_examples(features, num_examples=num_examples, seq_shapes=seq_shapes)
with ArrowWriter(features=features, path=dataset_path) as writer:
for key, record in dummy_data:
example = features.encode_example(record)
writer.write(example)
num_final_examples, num_bytes = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}."
)
dataset = datasets.Dataset.from_file(filename=dataset_path, info=datasets.DatasetInfo(features=features))
return dataset
def benchmark_iterating():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
functions = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
functions_shuffled = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset")
features = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")}
)
dataset = generate_example_dataset(
os.path.join(tmp_dir, "dataset.arrow"),
features,
num_examples=SPEED_TEST_N_EXAMPLES,
seq_shapes={"list": (100,)},
)
print("first set of iterations")
for func, kwargs in functions:
print(func.__name__, str(kwargs))
times[func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(dataset, **kwargs)
print("shuffling dataset")
dataset = dataset.shuffle()
print("Second set of iterations (after shuffling")
for func, kwargs in functions_shuffled:
print("shuffled ", func.__name__, str(kwargs))
times["shuffled " + func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(
dataset, **kwargs
)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8")) | null |
18,119 | import json
import sys
def format_json_to_md(input_json_file, output_md_file):
with open(input_json_file, encoding="utf-8") as f:
results = json.load(f)
output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(results):
benchmark_res = results[benchmark_name]
benchmark_file_name = benchmark_name.split("/")[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}")
title = "| metric |"
lines = "|--------|"
value = "| new / old (diff) |"
for metric_name in sorted(benchmark_res):
metric_vals = benchmark_res[metric_name]
new_val = metric_vals["new"]
old_val = metric_vals.get("old", None)
dif_val = metric_vals.get("diff", None)
val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(output_md_file, "w", encoding="utf-8") as f:
f.writelines("\n".join(output_md)) | null |
18,120 | import argparse
import re
import packaging.version
def global_version_update(version):
"""Update the version in all needed files."""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
def get_version():
"""Reads the current version in the __init__."""
with open(REPLACE_FILES["init"], "r") as f:
code = f.read()
default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
return packaging.version.parse(default_version)
The provided code snippet includes necessary dependencies for implementing the `pre_release_work` function. Write a Python function `def pre_release_work(patch=False)` to solve the following problem:
Do all the necessary pre-release steps.
Here is the function:
def pre_release_work(patch=False):
"""Do all the necessary pre-release steps."""
# First let's get the default version: base version if we are in dev, bump minor otherwise.
default_version = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version
elif patch:
default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
default_version = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
version = input(f"Which version are you releasing? [{default_version}]")
if len(version) == 0:
version = default_version
print(f"Updating version to {version}.")
global_version_update(version) | Do all the necessary pre-release steps. |
18,121 | import argparse
import re
import packaging.version
def global_version_update(version):
"""Update the version in all needed files."""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
def get_version():
"""Reads the current version in the __init__."""
with open(REPLACE_FILES["init"], "r") as f:
code = f.read()
default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
return packaging.version.parse(default_version)
The provided code snippet includes necessary dependencies for implementing the `post_release_work` function. Write a Python function `def post_release_work()` to solve the following problem:
Do all the necesarry post-release steps.
Here is the function:
def post_release_work():
"""Do all the necesarry post-release steps."""
# First let's get the current version
current_version = get_version()
dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
current_version = current_version.base_version
# Check with the user we got that right.
version = input(f"Which version are we developing now? [{dev_version}]")
if len(version) == 0:
version = dev_version
print(f"Updating version to {version}.")
global_version_update(version) | Do all the necesarry post-release steps. |
18,122 | import os
def amiiDump(f):
pagelist = []
uidlist = []
try:
amiibin = open(f, "rb")
pagenumber = 0
while amiibin:
# Read the bin 4 bytes at a time
chunk = amiibin.read(4)
if not chunk:
break
# Convert binary to non-ASCII hex and add Flipper Zero required formatting
dirtypage = chunk.hex()
cleanpage = ' '.join(dirtypage[i:i+2] for i in range(0, len(dirtypage), 2))
completepage = "Page {0}: {1}".format(pagenumber,cleanpage.upper())
# Place UID (first 8 bytes) into the uidlist
if pagenumber <= 1:
uidlist.append(cleanpage.upper())
# Append the completed page to the pagelist
pagelist.append(completepage)
pagenumber += 1
amiibin.close()
totalpages = "Pages total: {}".format(pagenumber)
# UID is 7 bytes, remove last 3 characters from the string to match
cleanuid = ' '.join(uidlist)[:-3]
return(totalpages,pagelist,cleanuid)
except IOError:
print("Can't open file.") | null |
18,123 | import os
template1 = """Filetype: Flipper NFC device
Version: 2
# Nfc device type can be UID, Mifare Ultralight, Bank card
Device type: NTAG215
# UID, ATQA and SAK are common for all formats"""
template2 ="""ATQA: 44 00
SAK: 00
# Mifare Ultralight specific data
Signature: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Mifare version: 00 04 04 02 01 00 11 03
Counter 0: 0
Tearing 0: 00
Counter 1: 0
Tearing 1: 00
Counter 2: 0
Tearing 2: 00"""
def amiiCombine(totalpages,pagelist,cleanuid,f):
#Now bring it all together and write to a Flipper Zero .nfc file
newf = f[:-3] + "nfc"
nfcfile = open(newf, "a+")
nfcfile.write(template1)
nfcfile.write("\n")
nfcfile.write("UID: {}".format(cleanuid))
nfcfile.write("\n")
nfcfile.write(template2)
nfcfile.write("\n")
nfcfile.write(totalpages)
nfcfile.write("\n")
for page in pagelist:
nfcfile.write(page)
nfcfile.write("\n")
nfcfile.close() | null |
18,134 | import os
import sys
import inspect
sys.path.insert(0, os.path.abspath(".."))
gh_url = "https://github.com/ddbourgin/numpy-ml"
def linkcode_resolve(domain, info):
if domain != "py":
return None
module = info.get("module", None)
fullname = info.get("fullname", None)
if not module or not fullname:
return None
obj = sys.modules.get(module, None)
if obj is None:
return None
for part in fullname.split("."):
obj = getattr(obj, part)
if isinstance(obj, property):
obj = obj.fget
try:
file = inspect.getsourcefile(obj)
if file is None:
return None
except:
return None
file = os.path.relpath(file, start=os.path.abspath(".."))
source, line_start = inspect.getsourcelines(obj)
line_end = line_start + len(source) - 1
filename = f"{file}#L{line_start}-L{line_end}"
return f"{gh_url}/blob/master/{filename}" | null |
18,135 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `_sigmoid` function. Write a Python function `def _sigmoid(x)` to solve the following problem:
The logistic sigmoid function
Here is the function:
def _sigmoid(x):
"""The logistic sigmoid function"""
return 1 / (1 + np.exp(-x)) | The logistic sigmoid function |
18,136 | import numpy as np
from .dt import DecisionTree
from .losses import MSELoss, CrossEntropyLoss
def to_one_hot(labels, n_classes=None):
if labels.ndim > 1:
raise ValueError("labels must have dimension 1, but got {}".format(labels.ndim))
N = labels.size
n_cols = np.max(labels) + 1 if n_classes is None else n_classes
one_hot = np.zeros((N, n_cols))
one_hot[np.arange(N), labels] = 1.0
return one_hot | null |
18,137 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `mse` function. Write a Python function `def mse(y)` to solve the following problem:
Mean squared error for decision tree (ie., mean) predictions
Here is the function:
def mse(y):
"""
Mean squared error for decision tree (ie., mean) predictions
"""
return np.mean((y - np.mean(y)) ** 2) | Mean squared error for decision tree (ie., mean) predictions |
18,138 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `entropy` function. Write a Python function `def entropy(y)` to solve the following problem:
Entropy of a label sequence
Here is the function:
def entropy(y):
"""
Entropy of a label sequence
"""
hist = np.bincount(y)
ps = hist / np.sum(hist)
return -np.sum([p * np.log2(p) for p in ps if p > 0]) | Entropy of a label sequence |
18,139 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `gini` function. Write a Python function `def gini(y)` to solve the following problem:
Gini impurity (local entropy) of a label sequence
Here is the function:
def gini(y):
"""
Gini impurity (local entropy) of a label sequence
"""
hist = np.bincount(y)
N = np.sum(hist)
return 1 - sum([(i / N) ** 2 for i in hist]) | Gini impurity (local entropy) of a label sequence |
18,140 | import numpy as np
from .dt import DecisionTree
def bootstrap_sample(X, Y):
N, M = X.shape
idxs = np.random.choice(N, N, replace=True)
return X[idxs], Y[idxs] | null |
18,141 | import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from hmmlearn.hmm import MultinomialHMM as MHMM
from numpy_ml.hmm import MultinomialHMM
def generate_training_data(params, n_steps=500, n_examples=15):
hmm = MultinomialHMM(A=params["A"], B=params["B"], pi=params["pi"])
# generate a new sequence
observations = []
for i in range(n_examples):
latent, obs = hmm.generate(
n_steps, params["latent_states"], params["obs_types"]
)
assert len(latent) == len(obs) == n_steps
observations.append(obs)
observations = np.array(observations)
return observations
def default_hmm():
obs_types = [0, 1, 2, 3]
latent_states = ["H", "C"]
# derived variables
V = len(obs_types)
N = len(latent_states)
# define a very simple HMM with T=3 observations
O = np.array([1, 3, 1]).reshape(1, -1)
A = np.array([[0.9, 0.1], [0.5, 0.5]])
B = np.array([[0.2, 0.7, 0.09, 0.01], [0.1, 0.0, 0.8, 0.1]])
pi = np.array([0.75, 0.25])
return {
"latent_states": latent_states,
"obs_types": obs_types,
"V": V,
"N": N,
"O": O,
"A": A,
"B": B,
"pi": pi,
}
def plot_matrices(params, best, best_theirs):
cmap = "copper"
ll_mine, best = best
ll_theirs, best_theirs = best_theirs
fig, axes = plt.subplots(3, 3)
axes = {
"A": [axes[0, 0], axes[0, 1], axes[0, 2]],
"B": [axes[1, 0], axes[1, 1], axes[1, 2]],
"pi": [axes[2, 0], axes[2, 1], axes[2, 2]],
}
for k, tt in [("A", "Transition"), ("B", "Emission"), ("pi", "Prior")]:
true_ax, est_ax, est_theirs_ax = axes[k]
true, est, est_theirs = params[k], best[k], best_theirs[k]
if k == "pi":
true = true.reshape(-1, 1)
est = est.reshape(-1, 1)
est_theirs = est_theirs.reshape(-1, 1)
true_ax = sns.heatmap(
true,
vmin=0.0,
vmax=1.0,
fmt=".2f",
cmap=cmap,
cbar=False,
annot=True,
ax=true_ax,
xticklabels=[],
yticklabels=[],
linewidths=0.25,
)
est_ax = sns.heatmap(
est,
vmin=0.0,
vmax=1.0,
fmt=".2f",
ax=est_ax,
cmap=cmap,
annot=True,
cbar=False,
xticklabels=[],
yticklabels=[],
linewidths=0.25,
)
est_theirs_ax = sns.heatmap(
est_theirs,
vmin=0.0,
vmax=1.0,
fmt=".2f",
cmap=cmap,
annot=True,
cbar=False,
xticklabels=[],
yticklabels=[],
linewidths=0.25,
ax=est_theirs_ax,
)
true_ax.set_title("{} (True)".format(tt))
est_ax.set_title("{} (Mine)".format(tt))
est_theirs_ax.set_title("{} (hmmlearn)".format(tt))
fig.suptitle("LL (mine): {:.2f}, LL (hmmlearn): {:.2f}".format(ll_mine, ll_theirs))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig("img/plot.png", dpi=300)
plt.close()
def test_HMM():
np.random.seed(12345)
np.set_printoptions(precision=5, suppress=True)
P = default_hmm()
ls, obs = P["latent_states"], P["obs_types"]
# generate a new sequence
O = generate_training_data(P, n_steps=30, n_examples=25)
tol = 1e-5
n_runs = 5
best, best_theirs = (-np.inf, []), (-np.inf, [])
for _ in range(n_runs):
hmm = MultinomialHMM()
A_, B_, pi_ = hmm.fit(O, ls, obs, tol=tol, verbose=True)
theirs = MHMM(
tol=tol,
verbose=True,
n_iter=int(1e9),
transmat_prior=1,
startprob_prior=1,
algorithm="viterbi",
n_components=len(ls),
)
O_flat = O.reshape(1, -1).flatten().reshape(-1, 1)
theirs = theirs.fit(O_flat, lengths=[O.shape[1]] * O.shape[0])
hmm2 = MultinomialHMM(A=A_, B=B_, pi=pi_)
like = np.sum([hmm2.log_likelihood(obs) for obs in O])
like_theirs = theirs.score(O_flat, lengths=[O.shape[1]] * O.shape[0])
if like > best[0]:
best = (like, {"A": A_, "B": B_, "pi": pi_})
if like_theirs > best_theirs[0]:
best_theirs = (
like_theirs,
{
"A": theirs.transmat_,
"B": theirs.emissionprob_,
"pi": theirs.startprob_,
},
)
print("Final log likelihood of sequence: {:.5f}".format(best[0]))
print("Final log likelihood of sequence (theirs): {:.5f}".format(best_theirs[0]))
plot_matrices(P, best, best_theirs) | null |
18,142 | import gym
from numpy_ml.rl_models.trainer import Trainer
from numpy_ml.rl_models.agents import (
CrossEntropyAgent,
MonteCarloAgent,
TemporalDifferenceAgent,
DynaAgent,
)
class Trainer(object):
def __init__(self, agent, env):
"""
An object to facilitate agent training and evaluation.
Parameters
----------
agent : :class:`AgentBase` instance
The agent to train.
env : ``gym.wrappers`` or ``gym.envs`` instance
The environment to run the agent on.
"""
self.env = env
self.agent = agent
self.rewards = {"total": [], "smooth_total": [], "n_steps": [], "duration": []}
def _train_episode(self, max_steps, render_every=None):
t0 = time()
if "train_episode" in dir(self.agent):
# online training updates over the course of the episode
reward, n_steps = self.agent.train_episode(max_steps)
else:
# offline training updates upon completion of the episode
reward, n_steps = self.agent.run_episode(max_steps)
self.agent.update()
duration = time() - t0
return reward, duration, n_steps
def train(
self,
n_episodes,
max_steps,
seed=None,
plot=True,
verbose=True,
render_every=None,
smooth_factor=0.05,
):
"""
Train an agent on an OpenAI gym environment, logging training
statistics along the way.
Parameters
----------
n_episodes : int
The number of episodes to train the agent across.
max_steps : int
The maximum number of steps the agent can take on each episode.
seed : int or None
A seed for the random number generator. Default is None.
plot : bool
Whether to generate a plot of the cumulative reward as a function
of training episode. Default is True.
verbose : bool
Whether to print intermediate run statistics to stdout during
training. Default is True.
smooth_factor : float in [0, 1]
The amount to smooth the cumulative reward across episodes. Larger
values correspond to less smoothing.
"""
if seed:
np.random.seed(seed)
self.env.seed(seed=seed)
t0 = time()
render_every = np.inf if render_every is None else render_every
sf = smooth_factor
for ep in range(n_episodes):
tot_rwd, duration, n_steps = self._train_episode(max_steps)
smooth_tot = tot_rwd if ep == 0 else (1 - sf) * smooth_tot + sf * tot_rwd
if verbose:
fstr = "[Ep. {:2}] {:<6.2f} Steps | Total Reward: {:<7.2f}"
fstr += " | Smoothed Total: {:<7.2f} | Duration: {:<6.2f}s"
print(fstr.format(ep + 1, n_steps, tot_rwd, smooth_tot, duration))
if (ep + 1) % render_every == 0:
fstr = "\tGreedy policy total reward: {:.2f}, n_steps: {:.2f}"
total, n_steps = self.agent.greedy_policy(max_steps)
print(fstr.format(total, n_steps))
self.rewards["total"].append(tot_rwd)
self.rewards["n_steps"].append(n_steps)
self.rewards["duration"].append(duration)
self.rewards["smooth_total"].append(smooth_tot)
train_time = (time() - t0) / 60
fstr = "Training took {:.2f} mins [{:.2f}s/episode]"
print(fstr.format(train_time, np.mean(self.rewards["duration"])))
rwd_greedy, n_steps = self.agent.greedy_policy(max_steps, render=False)
fstr = "Final greedy reward: {:.2f} | n_steps: {:.2f}"
print(fstr.format(rwd_greedy, n_steps))
if plot:
self.plot_rewards(rwd_greedy)
def plot_rewards(self, rwd_greedy):
"""
Plot the cumulative reward per episode as a function of episode number.
Notes
-----
Saves plot to the file ``./img/<agent>-<env>.png``
Parameters
----------
rwd_greedy : float
The cumulative reward earned with a final execution of a greedy
target policy.
"""
try:
import matplotlib.pyplot as plt
import seaborn as sns
# https://seaborn.pydata.org/generated/seaborn.set_context.html
# https://seaborn.pydata.org/generated/seaborn.set_style.html
sns.set_style("white")
sns.set_context("notebook", font_scale=1)
except:
fstr = "Error importing `matplotlib` and `seaborn` -- plotting functionality is disabled"
raise ImportError(fstr)
R = self.rewards
fig, ax = plt.subplots()
x = np.arange(len(R["total"]))
y = R["smooth_total"]
y_raw = R["total"]
ax.plot(x, y, label="smoothed")
ax.plot(x, y_raw, alpha=0.5, label="raw")
ax.axhline(y=rwd_greedy, xmin=min(x), xmax=max(x), ls=":", label="final greedy")
ax.legend()
sns.despine()
env = self.agent.env_info["id"]
agent = self.agent.hyperparameters["agent"]
ax.set_xlabel("Episode")
ax.set_ylabel("Cumulative reward")
ax.set_title("{} on '{}'".format(agent, env))
plt.savefig("img/{}-{}.png".format(agent, env))
plt.close("all")
class CrossEntropyAgent(AgentBase):
def __init__(self, env, n_samples_per_episode=500, retain_prcnt=0.2):
r"""
A cross-entropy method agent.
Notes
-----
The cross-entropy method [1]_ [2]_ agent only operates on ``envs`` with
discrete action spaces.
On each episode the agent generates `n_theta_samples` of the parameters
(:math:`\theta`) for its behavior policy. The `i`'th sample at
timestep `t` is:
.. math::
\theta_i &= \{\mathbf{W}_i^{(t)}, \mathbf{b}_i^{(t)} \} \\
\theta_i &\sim \mathcal{N}(\mu^{(t)}, \Sigma^{(t)})
Weights (:math:`\mathbf{W}_i`) and bias (:math:`\mathbf{b}_i`) are the
parameters of the softmax policy:
.. math::
\mathbf{z}_i &= \text{obs} \cdot \mathbf{W}_i + \mathbf{b}_i \\
p(a_i^{(t + 1)}) &= \frac{e^{\mathbf{z}_i}}{\sum_j e^{z_{ij}}} \\
a^{(t + 1)} &= \arg \max_j p(a_j^{(t+1)})
At the end of each episode, the agent takes the top `retain_prcnt`
highest scoring :math:`\theta` samples and combines them to generate
the mean and variance of the distribution of :math:`\theta` for the
next episode:
.. math::
\mu^{(t+1)} &= \text{avg}(\texttt{best_thetas}^{(t)}) \\
\Sigma^{(t+1)} &= \text{var}(\texttt{best_thetas}^{(t)})
References
----------
.. [1] Mannor, S., Rubinstein, R., & Gat, Y. (2003). The cross entropy
method for fast policy search. In *Proceedings of the 20th Annual
ICML, 20*.
.. [2] Rubinstein, R. (1997). optimization of computer simulation
models with rare events, *European Journal of Operational Research,
99*, 89–112.
Parameters
----------
env : :meth:`gym.wrappers` or :meth:`gym.envs` instance
The environment to run the agent on.
n_samples_per_episode : int
The number of theta samples to evaluate on each episode. Default is 500.
retain_prcnt: float
The percentage of `n_samples_per_episode` to use when calculating
the parameter update at the end of the episode. Default is 0.2.
"""
super().__init__(env)
self.retain_prcnt = retain_prcnt
self.n_samples_per_episode = n_samples_per_episode
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
self._create_2num_dicts()
b_len = np.prod(E["n_actions_per_dim"])
W_len = b_len * np.prod(E["obs_dim"])
theta_dim = b_len + W_len
# init mean and variance for mv gaussian with dimensions theta_dim
theta_mean = np.random.rand(theta_dim)
theta_var = np.ones(theta_dim)
self.parameters = {"theta_mean": theta_mean, "theta_var": theta_var}
self.derived_variables = {
"b_len": b_len,
"W_len": W_len,
"W_samples": [],
"b_samples": [],
"episode_num": 0,
"cumulative_rewards": [],
}
self.hyperparameters = {
"agent": "CrossEntropyAgent",
"retain_prcnt": self.retain_prcnt,
"n_samples_per_episode": self.n_samples_per_episode,
}
self.episode_history = {"rewards": [], "state_actions": []}
def act(self, obs):
r"""
Generate actions according to a softmax policy.
Notes
-----
The softmax policy assumes that the pmf over actions in state :math:`x_t` is
given by:
.. math::
\pi(a | x^{(t)}) = \text{softmax}(
\text{obs}^{(t)} \cdot \mathbf{W}_i^{(t)} + \mathbf{b}_i^{(t)} )
where :math:`\mathbf{W}` is a learned weight matrix, `obs` is the observation
at timestep `t`, and **b** is a learned bias vector.
Parameters
----------
obs : int or :py:class:`ndarray <numpy.ndarray>`
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
softmax policy.
"""
E, P = self.env_info, self.parameters
W, b = P["W"], P["b"]
s = self._obs2num[obs]
s = np.array([s]) if E["obs_dim"] == 1 else s
# compute softmax
Z = s.T @ W + b
e_Z = np.exp(Z - np.max(Z, axis=-1, keepdims=True))
action_probs = e_Z / e_Z.sum(axis=-1, keepdims=True)
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode
render : bool
Whether to render the episode during training
Returns
-------
reward : float
The total reward on the episode, averaged over the theta samples.
steps : float
The total number of steps taken on the episode, averaged over the
theta samples.
"""
self._sample_thetas()
E, D = self.env_info, self.derived_variables
n_actions = np.prod(E["n_actions_per_dim"])
W_len, obs_dim = D["W_len"], E["obs_dim"]
steps, rewards = [], []
for theta in D["theta_samples"]:
W = theta[:W_len].reshape(obs_dim, n_actions)
b = theta[W_len:]
total_rwd, n_steps = self._episode(W, b, max_steps, render)
rewards.append(total_rwd)
steps.append(n_steps)
# return the average reward and average number of steps across all
# samples on the current episode
D["episode_num"] += 1
D["cumulative_rewards"] = rewards
return np.mean(D["cumulative_rewards"]), np.mean(steps)
def _episode(self, W, b, max_steps, render):
"""
Run the agent for an episode.
Parameters
----------
W : :py:class:`ndarray <numpy.ndarray>` of shape `(obs_dim, n_actions)`
The weights for the softmax policy.
b : :py:class:`ndarray <numpy.ndarray>` of shape `(bias_len, )`
The bias for the softmax policy.
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The total number of steps taken on the episode.
"""
rwds, sa = [], []
H = self.episode_history
total_reward, n_steps = 0.0, 1
obs = self.env.reset()
self.parameters["W"] = W
self.parameters["b"] = b
for i in range(max_steps):
if render:
self.env.render()
n_steps += 1
action = self.act(obs)
s, a = self._obs2num[obs], self._action2num[action]
sa.append((s, a))
obs, reward, done, _ = self.env.step(action)
rwds.append(reward)
total_reward += reward
if done:
break
H["rewards"].append(rwds)
H["state_actions"].append(sa)
return total_reward, n_steps
def update(self):
r"""
Update :math:`\mu` and :math:`\Sigma` according to the rewards accrued on
the current episode.
Returns
-------
avg_reward : float
The average reward earned by the best `retain_prcnt` theta samples
on the current episode.
"""
D, P = self.derived_variables, self.parameters
n_retain = int(self.retain_prcnt * self.n_samples_per_episode)
# sort the cumulative rewards for each theta sample from greatest to least
sorted_y_val_idxs = np.argsort(D["cumulative_rewards"])[::-1]
top_idxs = sorted_y_val_idxs[:n_retain]
# update theta_mean and theta_var with the best theta value
P["theta_mean"] = np.mean(D["theta_samples"][top_idxs], axis=0)
P["theta_var"] = np.var(D["theta_samples"][top_idxs], axis=0)
def _sample_thetas(self):
"""
Sample `n_samples_per_episode` thetas from a multivariate Gaussian with
mean `theta_mean` and covariance `diag(theta_var)`
"""
P, N = self.parameters, self.n_samples_per_episode
Mu, Sigma = P["theta_mean"], np.diag(P["theta_var"])
samples = np.random.multivariate_normal(Mu, Sigma, N)
self.derived_variables["theta_samples"] = samples
def greedy_policy(self, max_steps, render=True):
"""
Execute a greedy policy using the current agent parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
E, D, P = self.env_info, self.derived_variables, self.parameters
Mu, Sigma = P["theta_mean"], np.diag(P["theta_var"])
sample = np.random.multivariate_normal(Mu, Sigma, 1)
W_len, obs_dim = D["W_len"], E["obs_dim"]
n_actions = np.prod(E["n_actions_per_dim"])
W = sample[0, :W_len].reshape(obs_dim, n_actions)
b = sample[0, W_len:]
total_reward, n_steps = self._episode(W, b, max_steps, render)
return total_reward, n_steps
def test_cross_entropy_agent():
seed = 12345
max_steps = 300
n_episodes = 50
retain_prcnt = 0.2
n_samples_per_episode = 500
env = gym.make("LunarLander-v2")
agent = CrossEntropyAgent(env, n_samples_per_episode, retain_prcnt)
trainer = Trainer(agent, env)
trainer.train(
n_episodes, max_steps, seed=seed, plot=True, verbose=True, render_every=None
) | null |
18,143 | import gym
from numpy_ml.rl_models.trainer import Trainer
from numpy_ml.rl_models.agents import (
CrossEntropyAgent,
MonteCarloAgent,
TemporalDifferenceAgent,
DynaAgent,
)
class Trainer(object):
def __init__(self, agent, env):
"""
An object to facilitate agent training and evaluation.
Parameters
----------
agent : :class:`AgentBase` instance
The agent to train.
env : ``gym.wrappers`` or ``gym.envs`` instance
The environment to run the agent on.
"""
self.env = env
self.agent = agent
self.rewards = {"total": [], "smooth_total": [], "n_steps": [], "duration": []}
def _train_episode(self, max_steps, render_every=None):
t0 = time()
if "train_episode" in dir(self.agent):
# online training updates over the course of the episode
reward, n_steps = self.agent.train_episode(max_steps)
else:
# offline training updates upon completion of the episode
reward, n_steps = self.agent.run_episode(max_steps)
self.agent.update()
duration = time() - t0
return reward, duration, n_steps
def train(
self,
n_episodes,
max_steps,
seed=None,
plot=True,
verbose=True,
render_every=None,
smooth_factor=0.05,
):
"""
Train an agent on an OpenAI gym environment, logging training
statistics along the way.
Parameters
----------
n_episodes : int
The number of episodes to train the agent across.
max_steps : int
The maximum number of steps the agent can take on each episode.
seed : int or None
A seed for the random number generator. Default is None.
plot : bool
Whether to generate a plot of the cumulative reward as a function
of training episode. Default is True.
verbose : bool
Whether to print intermediate run statistics to stdout during
training. Default is True.
smooth_factor : float in [0, 1]
The amount to smooth the cumulative reward across episodes. Larger
values correspond to less smoothing.
"""
if seed:
np.random.seed(seed)
self.env.seed(seed=seed)
t0 = time()
render_every = np.inf if render_every is None else render_every
sf = smooth_factor
for ep in range(n_episodes):
tot_rwd, duration, n_steps = self._train_episode(max_steps)
smooth_tot = tot_rwd if ep == 0 else (1 - sf) * smooth_tot + sf * tot_rwd
if verbose:
fstr = "[Ep. {:2}] {:<6.2f} Steps | Total Reward: {:<7.2f}"
fstr += " | Smoothed Total: {:<7.2f} | Duration: {:<6.2f}s"
print(fstr.format(ep + 1, n_steps, tot_rwd, smooth_tot, duration))
if (ep + 1) % render_every == 0:
fstr = "\tGreedy policy total reward: {:.2f}, n_steps: {:.2f}"
total, n_steps = self.agent.greedy_policy(max_steps)
print(fstr.format(total, n_steps))
self.rewards["total"].append(tot_rwd)
self.rewards["n_steps"].append(n_steps)
self.rewards["duration"].append(duration)
self.rewards["smooth_total"].append(smooth_tot)
train_time = (time() - t0) / 60
fstr = "Training took {:.2f} mins [{:.2f}s/episode]"
print(fstr.format(train_time, np.mean(self.rewards["duration"])))
rwd_greedy, n_steps = self.agent.greedy_policy(max_steps, render=False)
fstr = "Final greedy reward: {:.2f} | n_steps: {:.2f}"
print(fstr.format(rwd_greedy, n_steps))
if plot:
self.plot_rewards(rwd_greedy)
def plot_rewards(self, rwd_greedy):
"""
Plot the cumulative reward per episode as a function of episode number.
Notes
-----
Saves plot to the file ``./img/<agent>-<env>.png``
Parameters
----------
rwd_greedy : float
The cumulative reward earned with a final execution of a greedy
target policy.
"""
try:
import matplotlib.pyplot as plt
import seaborn as sns
# https://seaborn.pydata.org/generated/seaborn.set_context.html
# https://seaborn.pydata.org/generated/seaborn.set_style.html
sns.set_style("white")
sns.set_context("notebook", font_scale=1)
except:
fstr = "Error importing `matplotlib` and `seaborn` -- plotting functionality is disabled"
raise ImportError(fstr)
R = self.rewards
fig, ax = plt.subplots()
x = np.arange(len(R["total"]))
y = R["smooth_total"]
y_raw = R["total"]
ax.plot(x, y, label="smoothed")
ax.plot(x, y_raw, alpha=0.5, label="raw")
ax.axhline(y=rwd_greedy, xmin=min(x), xmax=max(x), ls=":", label="final greedy")
ax.legend()
sns.despine()
env = self.agent.env_info["id"]
agent = self.agent.hyperparameters["agent"]
ax.set_xlabel("Episode")
ax.set_ylabel("Cumulative reward")
ax.set_title("{} on '{}'".format(agent, env))
plt.savefig("img/{}-{}.png".format(agent, env))
plt.close("all")
class MonteCarloAgent(AgentBase):
def __init__(self, env, off_policy=False, temporal_discount=0.9, epsilon=0.1):
"""
A Monte-Carlo learning agent trained using either first-visit Monte
Carlo updates (on-policy) or incremental weighted importance sampling
(off-policy).
Parameters
----------
env : :class:`gym.wrappers` or :class:`gym.envs` instance
The environment to run the agent on.
off_policy : bool
Whether to use a behavior policy separate from the target policy
during training. If False, use the same epsilon-soft policy for
both behavior and target policies. Default is False.
temporal_discount : float between [0, 1]
The discount factor used for downweighting future rewards. Smaller
values result in greater discounting of future rewards. Default is
0.9.
epsilon : float between [0, 1]
The epsilon value in the epsilon-soft policy. Larger values
encourage greater exploration during training. Default is 0.1.
"""
super().__init__(env)
self.epsilon = epsilon
self.off_policy = off_policy
self.temporal_discount = temporal_discount
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
assert not E["continuous_observations"], "Observation space must be discrete"
n_states = np.prod(E["n_obs_per_dim"])
n_actions = np.prod(E["n_actions_per_dim"])
self._create_2num_dicts()
# behavior policy is stochastic, epsilon-soft policy
self.behavior_policy = self.target_policy = self._epsilon_soft_policy
if self.off_policy:
self.parameters["C"] = np.zeros((n_states, n_actions))
# target policy is deterministic, greedy policy
self.target_policy = self._greedy
# initialize Q function
self.parameters["Q"] = np.random.rand(n_states, n_actions)
# initialize returns object for each state-action pair
self.derived_variables = {
"returns": {(s, a): [] for s in range(n_states) for a in range(n_actions)},
"episode_num": 0,
}
self.hyperparameters = {
"agent": "MonteCarloAgent",
"epsilon": self.epsilon,
"off_policy": self.off_policy,
"temporal_discount": self.temporal_discount,
}
self.episode_history = {"state_actions": [], "rewards": []}
def _epsilon_soft_policy(self, s, a=None):
r"""
Epsilon-soft exploration policy.
Notes
-----
Soft policies are necessary for first-visit Monte Carlo methods, as
they require continual exploration (i.e., each state-action pair must
have nonzero probability of occurring).
In epsilon-soft policies, :math:`\pi(a \mid s) > 0` for all :math:`s
\in S` and all :math:`a \in A(s)` at the start of training. As learning
progresses, :math:`pi` gradually shifts closer and closer to a
deterministic optimal policy.
In particular, we have:
.. math::
\pi(a \mid s) &=
1 - \epsilon + \frac{\epsilon}{|A(s)|} &&\text{if} a = a^*
\pi(a \mid s) &=
\frac{\epsilon}{|A(s)|} &&\text{if} a \neq a^*
where :math:`|A(s)|` is the number of actions available in state `s`
and :math:`a^* \in A(s)` is the greedy action in state `s` (i.e.,
:math:`a^* = \arg \max_a Q(s, a)`).
Note that epsilon-greedy policies are instances of epsilon-soft
policies, defined as policies for which :math:`\pi(a|s) \geq \epsilon / |A(s)|`
for all states and actions.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``_obs2num[obs]``.
a : int, float, tuple, or None
The action number in the current state, as returned by
``self._action2num[obs]``. If None, sample an action from the
action probabilities in state `s`, otherwise, return the
probability of action `a` under the epsilon-soft policy. Default is
None.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
If `a` is None, this is an action sampled from the distribution
over actions defined by the epsilon-soft policy. If `a` is not
None, this is the probability of `a` under the epsilon-soft policy.
"""
E, P = self.env_info, self.parameters
# TODO: this assumes all actions are available in every state
n_actions = np.prod(E["n_actions_per_dim"])
a_star = P["Q"][s, :].argmax()
p_a_star = 1.0 - self.epsilon + (self.epsilon / n_actions)
p_a = self.epsilon / n_actions
action_probs = np.ones(n_actions) * p_a
action_probs[a_star] = p_a_star
np.testing.assert_allclose(np.sum(action_probs), 1)
if a is not None:
return action_probs[a]
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def _greedy(self, s, a=None):
"""
A greedy behavior policy.
Notes
-----
Only used when off-policy is True.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``self._obs2num[obs]``.
a : int, float, or tuple
The action number in the current state, as returned by
``self._action2num[obs]``. If None, sample an action from the action
probabilities in state `s`, otherwise, return the probability of
action `a` under the greedy policy. Default is None.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
If `a` is None, this is an action sampled from the distribution
over actions defined by the greedy policy. If `a` is not
None, this is the probability of `a` under the greedy policy.
"""
a_star = self.parameters["Q"][s, :].argmax()
if a is None:
out = self._num2action[a_star]
else:
out = 1 if a == a_star else 0
return out
def _on_policy_update(self):
r"""
Update the `Q` function using an on-policy first-visit Monte Carlo
update.
Notes
-----
The on-policy first-visit Monte Carlo update is
.. math::
Q'(s, a) \leftarrow
\text{avg}(\text{reward following first visit to } (s, a)
\text{ across all episodes})
RL agents seek to learn action values conditional on subsequent optimal
behavior, but they need to behave non-optimally in order to explore all
actions (to find the optimal actions).
The on-policy approach is a compromise -- it learns action values not
for the optimal policy, but for a *near*-optimal policy that still
explores (the epsilon-soft policy).
"""
D, P, HS = self.derived_variables, self.parameters, self.episode_history
ep_rewards = HS["rewards"]
sa_tuples = set(HS["state_actions"])
locs = [HS["state_actions"].index(sa) for sa in sa_tuples]
cumulative_returns = [np.sum(ep_rewards[i:]) for i in locs]
# update Q value with the average of the first-visit return across
# episodes
for (s, a), cr in zip(sa_tuples, cumulative_returns):
D["returns"][(s, a)].append(cr)
P["Q"][s, a] = np.mean(D["returns"][(s, a)])
def _off_policy_update(self):
"""
Update `Q` using weighted importance sampling.
Notes
-----
In importance sampling updates, we account for the fact that we are
updating a different policy from the one we used to generate behavior
by weighting the accumulated rewards by the ratio of the probability of
the trajectory under the target policy versus its probability under
the behavior policies. This is known as the importance sampling weight.
In weighted importance sampling, we scale the accumulated rewards for a
trajectory by their importance sampling weight, then take the
*weighted* average using the importance sampling weight. This weighted
average then becomes the value for the trajectory.
W = importance sampling weight
G_t = total discounted reward from time t until episode end
C_n = sum of importance weights for the first n rewards
This algorithm converges to Q* in the limit.
"""
P = self.parameters
HS = self.episode_history
ep_rewards = HS["rewards"]
T = len(ep_rewards)
G, W = 0.0, 1.0
for t in reversed(range(T)):
s, a = HS["state_actions"][t]
G = self.temporal_discount * G + ep_rewards[t]
P["C"][s, a] += W
# update Q(s, a) using weighted importance sampling
P["Q"][s, a] += (W / P["C"][s, a]) * (G - P["Q"][s, a])
# multiply the importance sampling ratio by the current weight
W *= self.target_policy(s, a) / self.behavior_policy(s, a)
if W == 0.0:
break
def act(self, obs):
r"""
Execute the behavior policy--an :math:`\epsilon`-soft policy used to
generate actions during training.
Parameters
----------
obs : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``env.step(action)``
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
epsilon-soft policy.
""" # noqa: E501
s = self._obs2num[obs]
return self.behavior_policy(s)
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
D = self.derived_variables
total_rwd, n_steps = self._episode(max_steps, render)
D["episode_num"] += 1
return total_rwd, n_steps
def _episode(self, max_steps, render):
"""
Execute agent on an episode.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
obs = self.env.reset()
HS = self.episode_history
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
n_steps += 1
action = self.act(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store (state, action) tuple
HS["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
# record rewards
HS["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
def update(self):
"""
Update the parameters of the model following the completion of an
episode. Flush the episode history after the update is complete.
"""
H = self.hyperparameters
if H["off_policy"]:
self._off_policy_update()
else:
self._on_policy_update()
self.flush_history()
def greedy_policy(self, max_steps, render=True):
"""
Execute a greedy policy using the current agent parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
H = self.episode_history
obs = self.env.reset()
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
n_steps += 1
action = self._greedy(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store (state, action) tuple
H["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
# record rewards
H["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
def test_monte_carlo_agent():
seed = 12345
max_steps = 300
n_episodes = 10000
epsilon = 0.05
off_policy = True
smooth_factor = 0.001
temporal_discount = 0.95
env = gym.make("Copy-v0")
agent = MonteCarloAgent(env, off_policy, temporal_discount, epsilon)
trainer = Trainer(agent, env)
trainer.train(
n_episodes,
max_steps,
seed=seed,
plot=True,
verbose=True,
render_every=None,
smooth_factor=smooth_factor,
) | null |
18,144 | import gym
from numpy_ml.rl_models.trainer import Trainer
from numpy_ml.rl_models.agents import (
CrossEntropyAgent,
MonteCarloAgent,
TemporalDifferenceAgent,
DynaAgent,
)
class Trainer(object):
def __init__(self, agent, env):
"""
An object to facilitate agent training and evaluation.
Parameters
----------
agent : :class:`AgentBase` instance
The agent to train.
env : ``gym.wrappers`` or ``gym.envs`` instance
The environment to run the agent on.
"""
self.env = env
self.agent = agent
self.rewards = {"total": [], "smooth_total": [], "n_steps": [], "duration": []}
def _train_episode(self, max_steps, render_every=None):
t0 = time()
if "train_episode" in dir(self.agent):
# online training updates over the course of the episode
reward, n_steps = self.agent.train_episode(max_steps)
else:
# offline training updates upon completion of the episode
reward, n_steps = self.agent.run_episode(max_steps)
self.agent.update()
duration = time() - t0
return reward, duration, n_steps
def train(
self,
n_episodes,
max_steps,
seed=None,
plot=True,
verbose=True,
render_every=None,
smooth_factor=0.05,
):
"""
Train an agent on an OpenAI gym environment, logging training
statistics along the way.
Parameters
----------
n_episodes : int
The number of episodes to train the agent across.
max_steps : int
The maximum number of steps the agent can take on each episode.
seed : int or None
A seed for the random number generator. Default is None.
plot : bool
Whether to generate a plot of the cumulative reward as a function
of training episode. Default is True.
verbose : bool
Whether to print intermediate run statistics to stdout during
training. Default is True.
smooth_factor : float in [0, 1]
The amount to smooth the cumulative reward across episodes. Larger
values correspond to less smoothing.
"""
if seed:
np.random.seed(seed)
self.env.seed(seed=seed)
t0 = time()
render_every = np.inf if render_every is None else render_every
sf = smooth_factor
for ep in range(n_episodes):
tot_rwd, duration, n_steps = self._train_episode(max_steps)
smooth_tot = tot_rwd if ep == 0 else (1 - sf) * smooth_tot + sf * tot_rwd
if verbose:
fstr = "[Ep. {:2}] {:<6.2f} Steps | Total Reward: {:<7.2f}"
fstr += " | Smoothed Total: {:<7.2f} | Duration: {:<6.2f}s"
print(fstr.format(ep + 1, n_steps, tot_rwd, smooth_tot, duration))
if (ep + 1) % render_every == 0:
fstr = "\tGreedy policy total reward: {:.2f}, n_steps: {:.2f}"
total, n_steps = self.agent.greedy_policy(max_steps)
print(fstr.format(total, n_steps))
self.rewards["total"].append(tot_rwd)
self.rewards["n_steps"].append(n_steps)
self.rewards["duration"].append(duration)
self.rewards["smooth_total"].append(smooth_tot)
train_time = (time() - t0) / 60
fstr = "Training took {:.2f} mins [{:.2f}s/episode]"
print(fstr.format(train_time, np.mean(self.rewards["duration"])))
rwd_greedy, n_steps = self.agent.greedy_policy(max_steps, render=False)
fstr = "Final greedy reward: {:.2f} | n_steps: {:.2f}"
print(fstr.format(rwd_greedy, n_steps))
if plot:
self.plot_rewards(rwd_greedy)
def plot_rewards(self, rwd_greedy):
"""
Plot the cumulative reward per episode as a function of episode number.
Notes
-----
Saves plot to the file ``./img/<agent>-<env>.png``
Parameters
----------
rwd_greedy : float
The cumulative reward earned with a final execution of a greedy
target policy.
"""
try:
import matplotlib.pyplot as plt
import seaborn as sns
# https://seaborn.pydata.org/generated/seaborn.set_context.html
# https://seaborn.pydata.org/generated/seaborn.set_style.html
sns.set_style("white")
sns.set_context("notebook", font_scale=1)
except:
fstr = "Error importing `matplotlib` and `seaborn` -- plotting functionality is disabled"
raise ImportError(fstr)
R = self.rewards
fig, ax = plt.subplots()
x = np.arange(len(R["total"]))
y = R["smooth_total"]
y_raw = R["total"]
ax.plot(x, y, label="smoothed")
ax.plot(x, y_raw, alpha=0.5, label="raw")
ax.axhline(y=rwd_greedy, xmin=min(x), xmax=max(x), ls=":", label="final greedy")
ax.legend()
sns.despine()
env = self.agent.env_info["id"]
agent = self.agent.hyperparameters["agent"]
ax.set_xlabel("Episode")
ax.set_ylabel("Cumulative reward")
ax.set_title("{} on '{}'".format(agent, env))
plt.savefig("img/{}-{}.png".format(agent, env))
plt.close("all")
class TemporalDifferenceAgent(AgentBase):
def __init__(
self,
env,
lr=0.4,
epsilon=0.1,
n_tilings=8,
obs_max=None,
obs_min=None,
grid_dims=[8, 8],
off_policy=False,
temporal_discount=0.99,
):
r"""
A temporal difference learning agent with expected SARSA (on-policy) [3]_ or
TD(0) `Q`-learning (off-policy) [4]_ updates.
Notes
-----
The expected SARSA on-policy TD(0) update is:
.. math::
Q(s, a) \leftarrow Q(s, a) + \eta \left(
r + \gamma \mathbb{E}_\pi[Q(s', a') \mid s'] - Q(s, a)
\right)
and the TD(0) off-policy Q-learning upate is:
.. math::
Q(s, a) \leftarrow Q(s, a) + \eta (
r + \gamma \max_a \left\{ Q(s', a) \right\} - Q(s, a)
)
where in each case we have taken action `a` in state `s`, received
reward `r`, and transitioned into state :math:`s'`. In the above
equations, :math:`\eta` is a learning rate parameter, :math:`\gamma` is
a temporal discount factor, and :math:`\mathbb{E}_\pi[ Q[s', a'] \mid
s']` is the expected value under the current policy :math:`\pi` of the
Q function conditioned that we are in state :math:`s'`.
Observe that the expected SARSA update can be used for both on- and
off-policy methods. In an off-policy context, if the target policy is
greedy and the expectation is taken wrt. the target policy then the
expected SARSA update is exactly Q-learning.
NB. For this implementation the agent requires a discrete action
space, but will try to discretize the observation space via tiling if
it is continuous.
References
----------
.. [3] Rummery, G. & Niranjan, M. (1994). *On-Line Q-learning Using
Connectionist Systems*. Tech Report 166. Cambridge University
Department of Engineering.
.. [4] Watkins, C. (1989). Learning from delayed rewards. *PhD thesis,
Cambridge University*.
Parameters
----------
env : gym.wrappers or gym.envs instance
The environment to run the agent on.
lr : float
Learning rate for the Q function updates. Default is 0.05.
epsilon : float between [0, 1]
The epsilon value in the epsilon-soft policy. Larger values
encourage greater exploration during training. Default is 0.1.
n_tilings : int
The number of overlapping tilings to use if the ``env`` observation
space is continuous. Unused if observation space is discrete.
Default is 8.
obs_max : float or :py:class:`ndarray <numpy.ndarray>`
The value to treat as the max value of the observation space when
calculating the grid widths if the observation space is continuous.
If None, use ``env.observation_space.high``. Unused if observation
space is discrete. Default is None.
obs_min : float or :py:class:`ndarray <numpy.ndarray>`
The value to treat as the min value of the observation space when
calculating grid widths if the observation space is continuous. If
None, use ``env.observation_space.low``. Unused if observation
space is discrete. Default is None.
grid_dims : list
The number of rows and columns in each tiling grid if the env
observation space is continuous. Unused if observation space is
discrete. Default is [8, 8].
off_policy : bool
Whether to use a behavior policy separate from the target policy
during training. If False, use the same epsilon-soft policy for
both behavior and target policies. Default is False.
temporal_discount : float between [0, 1]
The discount factor used for downweighting future rewards. Smaller
values result in greater discounting of future rewards. Default is
0.9.
"""
super().__init__(env)
self.lr = lr
self.obs_max = obs_max
self.obs_min = obs_min
self.epsilon = epsilon
self.n_tilings = n_tilings
self.grid_dims = grid_dims
self.off_policy = off_policy
self.temporal_discount = temporal_discount
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
obs_encoder = None
if E["continuous_observations"]:
obs_encoder, _ = tile_state_space(
self.env,
self.env_info,
self.n_tilings,
state_action=False,
obs_max=self.obs_max,
obs_min=self.obs_min,
grid_size=self.grid_dims,
)
self._create_2num_dicts(obs_encoder=obs_encoder)
# behavior policy is stochastic, epsilon-soft policy
self.behavior_policy = self.target_policy = self._epsilon_soft_policy
if self.off_policy:
# target policy is deterministic, greedy policy
self.target_policy = self._greedy
# initialize Q function
self.parameters["Q"] = defaultdict(np.random.rand)
# initialize returns object for each state-action pair
self.derived_variables = {"episode_num": 0}
self.hyperparameters = {
"agent": "TemporalDifferenceAgent",
"lr": self.lr,
"obs_max": self.obs_max,
"obs_min": self.obs_min,
"epsilon": self.epsilon,
"n_tilings": self.n_tilings,
"grid_dims": self.grid_dims,
"off_policy": self.off_policy,
"temporal_discount": self.temporal_discount,
}
self.episode_history = {"state_actions": [], "rewards": []}
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode without updating the priority queue
or performing backups.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode
render : bool
Whether to render the episode during training
Returns
-------
reward : float
The total reward on the episode, averaged over the theta samples.
steps : float
The total number of steps taken on the episode, averaged over the
theta samples.
"""
return self._episode(max_steps, render, update=False)
def train_episode(self, max_steps, render=False):
"""
Train the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
D = self.derived_variables
total_rwd, n_steps = self._episode(max_steps, render, update=True)
D["episode_num"] += 1
return total_rwd, n_steps
def _episode(self, max_steps, render, update=True):
"""
Run or train the agent on an episode.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
update : bool
Whether to perform the Q function backups after each step. Default
is True.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
self.flush_history()
obs = self.env.reset()
HS = self.episode_history
action = self.act(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store initial (state, action) tuple
HS["state_actions"].append((s, a))
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
HS["rewards"].append(reward)
total_reward += reward
# generate next state and action
action = self.act(obs)
s_ = self._obs2num[obs] if not done else None
a_ = self._action2num[action]
# store next (state, action) tuple
HS["state_actions"].append((s_, a_))
if update:
self.update()
if done:
break
return total_reward, n_steps
def _epsilon_soft_policy(self, s, a=None):
r"""
Epsilon-soft exploration policy.
In epsilon-soft policies, :math:`\pi(a|s) > 0` for all s ∈ S and all a
∈ A(s) at the start of training. As learning progresses, :math:`\pi`
gradually shifts closer and closer to a deterministic optimal policy.
In particular, we have:
pi(a|s) = 1 - epsilon + (epsilon / |A(s)|) IFF a == a*
pi(a|s) = epsilon / |A(s)| IFF a != a*
where
|A(s)| is the number of actions available in state s
a* ∈ A(s) is the greedy action in state s (i.e., a* = argmax_a Q(s, a))
Note that epsilon-greedy policies are instances of epsilon-soft
policies, defined as policies for which pi(a|s) >= epsilon / |A(s)| for
all states and actions.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``self._obs2num[obs]``
a : int, float, or tuple
The action number in the current state, as returned by
self._action2num[obs]. If None, sample an action from the action
probabilities in state s, otherwise, return the probability of
action `a` under the epsilon-soft policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by `self._num2action`
If `a` is None, returns an action sampled from the distribution
over actions defined by the epsilon-soft policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
epsilon-soft policy.
""" # noqa: E501
E, P = self.env_info, self.parameters
# TODO: this assumes all actions are available in every state
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([P["Q"][(s, aa)] for aa in range(n_actions)])
p_a_star = 1.0 - self.epsilon + (self.epsilon / n_actions)
p_a = self.epsilon / n_actions
action_probs = np.ones(n_actions) * p_a
action_probs[a_star] = p_a_star
np.testing.assert_allclose(np.sum(action_probs), 1)
if a is not None:
return action_probs[a]
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def _greedy(self, s, a=None):
"""
A greedy behavior policy. Only used when off-policy is true.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
``self._obs2num[obs]``
a : int, float, or tuple
The action number in the current state, as returned by
``self._action2num[obs]``. If None, sample an action from the
action probabilities in state `s`, otherwise, return the
probability of action `a` under the greedy policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``self._num2action``
If `a` is None, returns an action sampled from the distribution
over actions defined by the greedy policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
greedy policy.
""" # noqa: E501
P, E = self.parameters, self.env_info
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([P["Q"][(s, aa)] for aa in range(n_actions)])
if a is None:
out = self._num2action[a_star]
else:
out = 1 if a == a_star else 0
return out
def _on_policy_update(self, s, a, r, s_, a_):
"""
Update the Q function using the expected SARSA on-policy TD(0) update:
Q[s, a] <- Q[s, a] + lr * [
r + temporal_discount * E[Q[s', a'] | s'] - Q[s, a]
]
where
E[ Q[s', a'] | s'] is the expected value of the Q function over all
a_ given that we're in state s' under the current policy
NB. the expected SARSA update can be used for both on- and off-policy
methods. In an off-policy context, if the target policy is greedy and
the expectation is taken wrt. the target policy then the expected SARSA
update is exactly Q-learning.
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation at timestep t-1
a : int as returned by `self._action2num`
The id for the action taken at timestep t-1
r : float
The reward after taking action `a` in state `s` at timestep t-1
s_ : int as returned by `self._obs2num`
The id for the state/observation at timestep t
a_ : int as returned by `self._action2num`
The id for the action taken at timestep t
"""
Q, E, pi = self.parameters["Q"], self.env_info, self.behavior_policy
# TODO: this assumes that all actions are available in each state
n_actions = np.prod(E["n_actions_per_dim"])
# compute the expected value of Q(s', a') given that we are in state s'
E_Q = np.sum([pi(s_, aa) * Q[(s_, aa)] for aa in range(n_actions)]) if s_ else 0
# perform the expected SARSA TD(0) update
qsa = Q[(s, a)]
Q[(s, a)] = qsa + self.lr * (r + self.temporal_discount * E_Q - qsa)
def _off_policy_update(self, s, a, r, s_):
"""
Update the `Q` function using the TD(0) Q-learning update:
Q[s, a] <- Q[s, a] + lr * (
r + temporal_discount * max_a { Q[s', a] } - Q[s, a]
)
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation at timestep `t-1`
a : int as returned by `self._action2num`
The id for the action taken at timestep `t-1`
r : float
The reward after taking action `a` in state `s` at timestep `t-1`
s_ : int as returned by `self._obs2num`
The id for the state/observation at timestep `t`
"""
Q, E = self.parameters["Q"], self.env_info
n_actions = np.prod(E["n_actions_per_dim"])
qsa = Q[(s, a)]
Qs_ = [Q[(s_, aa)] for aa in range(n_actions)] if s_ else [0]
Q[(s, a)] = qsa + self.lr * (r + self.temporal_discount * np.max(Qs_) - qsa)
def update(self):
"""Update the parameters of the model online after each new state-action."""
H, HS = self.hyperparameters, self.episode_history
(s, a), r = HS["state_actions"][-2], HS["rewards"][-1]
s_, a_ = HS["state_actions"][-1]
if H["off_policy"]:
self._off_policy_update(s, a, r, s_)
else:
self._on_policy_update(s, a, r, s_, a_)
def act(self, obs):
r"""
Execute the behavior policy--an :math:`\epsilon`-soft policy used to
generate actions during training.
Parameters
----------
obs : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``env.step(action)``
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
epsilon-soft policy.
""" # noqa: E501
s = self._obs2num[obs]
return self.behavior_policy(s)
def greedy_policy(self, max_steps, render=True):
"""
Execute a deterministic greedy policy using the current agent
parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
self.flush_history()
H = self.episode_history
obs = self.env.reset()
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
s = self._obs2num[obs]
action = self._greedy(s)
a = self._action2num[action]
# store (state, action) tuple
H["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
H["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
def test_temporal_difference_agent():
seed = 12345
max_steps = 200
n_episodes = 5000
lr = 0.4
n_tilings = 10
epsilon = 0.10
off_policy = True
grid_dims = [100, 100]
smooth_factor = 0.005
temporal_discount = 0.999
env = gym.make("LunarLander-v2")
obs_max = 1
obs_min = -1
agent = TemporalDifferenceAgent(
env,
lr=lr,
obs_max=obs_max,
obs_min=obs_min,
epsilon=epsilon,
n_tilings=n_tilings,
grid_dims=grid_dims,
off_policy=off_policy,
temporal_discount=temporal_discount,
)
trainer = Trainer(agent, env)
trainer.train(
n_episodes,
max_steps,
seed=seed,
plot=True,
verbose=True,
render_every=None,
smooth_factor=smooth_factor,
) | null |
18,145 | import gym
from numpy_ml.rl_models.trainer import Trainer
from numpy_ml.rl_models.agents import (
CrossEntropyAgent,
MonteCarloAgent,
TemporalDifferenceAgent,
DynaAgent,
)
class Trainer(object):
def __init__(self, agent, env):
"""
An object to facilitate agent training and evaluation.
Parameters
----------
agent : :class:`AgentBase` instance
The agent to train.
env : ``gym.wrappers`` or ``gym.envs`` instance
The environment to run the agent on.
"""
self.env = env
self.agent = agent
self.rewards = {"total": [], "smooth_total": [], "n_steps": [], "duration": []}
def _train_episode(self, max_steps, render_every=None):
t0 = time()
if "train_episode" in dir(self.agent):
# online training updates over the course of the episode
reward, n_steps = self.agent.train_episode(max_steps)
else:
# offline training updates upon completion of the episode
reward, n_steps = self.agent.run_episode(max_steps)
self.agent.update()
duration = time() - t0
return reward, duration, n_steps
def train(
self,
n_episodes,
max_steps,
seed=None,
plot=True,
verbose=True,
render_every=None,
smooth_factor=0.05,
):
"""
Train an agent on an OpenAI gym environment, logging training
statistics along the way.
Parameters
----------
n_episodes : int
The number of episodes to train the agent across.
max_steps : int
The maximum number of steps the agent can take on each episode.
seed : int or None
A seed for the random number generator. Default is None.
plot : bool
Whether to generate a plot of the cumulative reward as a function
of training episode. Default is True.
verbose : bool
Whether to print intermediate run statistics to stdout during
training. Default is True.
smooth_factor : float in [0, 1]
The amount to smooth the cumulative reward across episodes. Larger
values correspond to less smoothing.
"""
if seed:
np.random.seed(seed)
self.env.seed(seed=seed)
t0 = time()
render_every = np.inf if render_every is None else render_every
sf = smooth_factor
for ep in range(n_episodes):
tot_rwd, duration, n_steps = self._train_episode(max_steps)
smooth_tot = tot_rwd if ep == 0 else (1 - sf) * smooth_tot + sf * tot_rwd
if verbose:
fstr = "[Ep. {:2}] {:<6.2f} Steps | Total Reward: {:<7.2f}"
fstr += " | Smoothed Total: {:<7.2f} | Duration: {:<6.2f}s"
print(fstr.format(ep + 1, n_steps, tot_rwd, smooth_tot, duration))
if (ep + 1) % render_every == 0:
fstr = "\tGreedy policy total reward: {:.2f}, n_steps: {:.2f}"
total, n_steps = self.agent.greedy_policy(max_steps)
print(fstr.format(total, n_steps))
self.rewards["total"].append(tot_rwd)
self.rewards["n_steps"].append(n_steps)
self.rewards["duration"].append(duration)
self.rewards["smooth_total"].append(smooth_tot)
train_time = (time() - t0) / 60
fstr = "Training took {:.2f} mins [{:.2f}s/episode]"
print(fstr.format(train_time, np.mean(self.rewards["duration"])))
rwd_greedy, n_steps = self.agent.greedy_policy(max_steps, render=False)
fstr = "Final greedy reward: {:.2f} | n_steps: {:.2f}"
print(fstr.format(rwd_greedy, n_steps))
if plot:
self.plot_rewards(rwd_greedy)
def plot_rewards(self, rwd_greedy):
"""
Plot the cumulative reward per episode as a function of episode number.
Notes
-----
Saves plot to the file ``./img/<agent>-<env>.png``
Parameters
----------
rwd_greedy : float
The cumulative reward earned with a final execution of a greedy
target policy.
"""
try:
import matplotlib.pyplot as plt
import seaborn as sns
# https://seaborn.pydata.org/generated/seaborn.set_context.html
# https://seaborn.pydata.org/generated/seaborn.set_style.html
sns.set_style("white")
sns.set_context("notebook", font_scale=1)
except:
fstr = "Error importing `matplotlib` and `seaborn` -- plotting functionality is disabled"
raise ImportError(fstr)
R = self.rewards
fig, ax = plt.subplots()
x = np.arange(len(R["total"]))
y = R["smooth_total"]
y_raw = R["total"]
ax.plot(x, y, label="smoothed")
ax.plot(x, y_raw, alpha=0.5, label="raw")
ax.axhline(y=rwd_greedy, xmin=min(x), xmax=max(x), ls=":", label="final greedy")
ax.legend()
sns.despine()
env = self.agent.env_info["id"]
agent = self.agent.hyperparameters["agent"]
ax.set_xlabel("Episode")
ax.set_ylabel("Cumulative reward")
ax.set_title("{} on '{}'".format(agent, env))
plt.savefig("img/{}-{}.png".format(agent, env))
plt.close("all")
class DynaAgent(AgentBase):
def __init__(
self,
env,
lr=0.4,
epsilon=0.1,
n_tilings=8,
obs_max=None,
obs_min=None,
q_plus=False,
grid_dims=[8, 8],
explore_weight=0.05,
temporal_discount=0.9,
n_simulated_actions=50,
):
r"""
A Dyna-`Q` / Dyna-`Q+` agent [5]_ with full TD(0) `Q`-learning updates via
prioritized-sweeping [6]_ .
Notes
-----
This approach consists of three components: a planning method involving
simulated actions, a direct RL method where the agent directly interacts
with the environment, and a model-learning method where the agent
learns to better represent the environment during planning.
During planning, the agent performs random-sample one-step tabular
Q-planning with prioritized sweeping. This entails using a priority
queue to retrieve the state-action pairs from the agent's history which
would stand to have the largest change to their Q-values if backed up.
Specifically, for state action pair `(s, a)` the priority value is:
.. math::
P = \sum_{s'} p(s') | r + \gamma \max_a \{Q(s', a) \} - Q(s, a) |
which corresponds to the absolute magnitude of the TD(0) Q-learning
backup for the pair.
When the first pair in the queue is backed up, the effect on each of
its predecessor pairs is computed. If the predecessor's priority is
greater than a small threshold the pair is added to the queue and the
process is repeated until either the queue is empty or we have exceeded
`n_simulated_actions` updates. These backups occur without the agent
taking any action in the environment and thus constitute simulations
based on the agent's current model of the environment (i.e., its
tabular state-action history).
During the direct RL phase, the agent takes an action based on its
current behavior policy and Q function and receives a reward from the
environment. The agent logs this state-action-reward-new state tuple in
its interaction table (i.e., environment model) and updates its Q
function using a full-backup version of the Q-learning update:
.. math::
Q(s, a) \leftarrow Q(s, a) + \eta \sum_{r, s'} p(r, s' \mid s, a)
\left(r + \gamma \max_a \{ Q(s', a) \} - Q(s, a) \right)
References
----------
.. [5] Sutton, R. (1990). Integrated architectures for learning,
planning, and reacting based on approximating dynamic programming.
In *Proceedings of the 7th Annual ICML*, 216-224.
.. [6] Moore, A. & Atkeson, C. (1993). Prioritized sweeping:
Reinforcement learning with less data and less time. *Machine
Learning, 13(1)*, 103-130.
Parameters
----------
env : :class:`gym.wrappers` or :class:`gym.envs` instance
The environment to run the agent on
lr : float
Learning rate for the `Q` function updates. Default is 0.05.
epsilon : float between [0, 1]
The epsilon value in the epsilon-soft policy. Larger values
encourage greater exploration during training. Default is 0.1.
n_tilings : int
The number of overlapping tilings to use if the env observation
space is continuous. Unused if observation space is discrete.
Default is 8.
obs_max : float or :py:class:`ndarray <numpy.ndarray>` or None
The value to treat as the max value of the observation space when
calculating the grid widths if the observation space is continuous.
If None, use :meth:`env.observation_space.high`. Unused if observation
space is discrete. Default is None.
obs_min : float or :py:class:`ndarray <numpy.ndarray>` or None
The value to treat as the min value of the observation space when
calculating grid widths if the observation space is continuous. If
None, use :meth:`env.observation_space.low`. Unused if observation
space is discrete. Default is None.
grid_dims : list
The number of rows and columns in each tiling grid if the env
observation space is continuous. Unused if observation space is
discrete. Default is `[8, 8]`.
q_plus : bool
Whether to add incentives for visiting states that the agent hasn't
encountered recently. Default is False.
explore_weight : float
Amount to incentivize exploring states that the agent hasn't
recently visited. Only used if `q_plus` is True. Default is 0.05.
temporal_discount : float between [0, 1]
The discount factor used for downweighting future rewards. Smaller
values result in greater discounting of future rewards. Default is
0.9.
n_simulated_actions : int
THe number of simulated actions to perform for each "real" action.
Default is 50.
"""
super().__init__(env)
self.lr = lr
self.q_plus = q_plus
self.obs_max = obs_max
self.obs_min = obs_min
self.epsilon = epsilon
self.n_tilings = n_tilings
self.grid_dims = grid_dims
self.explore_weight = explore_weight
self.temporal_discount = temporal_discount
self.n_simulated_actions = n_simulated_actions
self._init_params()
def _init_params(self):
E = self.env_info
assert not E["continuous_actions"], "Action space must be discrete"
obs_encoder = None
if E["continuous_observations"]:
obs_encoder, _ = tile_state_space(
self.env,
self.env_info,
self.n_tilings,
state_action=False,
obs_max=self.obs_max,
obs_min=self.obs_min,
grid_size=self.grid_dims,
)
self._create_2num_dicts(obs_encoder=obs_encoder)
self.behavior_policy = self.target_policy = self._epsilon_soft_policy
# initialize Q function and model
self.parameters["Q"] = defaultdict(np.random.rand)
self.parameters["model"] = EnvModel()
# initialize returns object for each state-action pair
self.derived_variables = {
"episode_num": 0,
"sweep_queue": {},
"visited": set(),
"steps_since_last_visit": defaultdict(lambda: 0),
}
if self.q_plus:
self.derived_variables["steps_since_last_visit"] = defaultdict(
np.random.rand,
)
self.hyperparameters = {
"agent": "DynaAgent",
"lr": self.lr,
"q_plus": self.q_plus,
"obs_max": self.obs_max,
"obs_min": self.obs_min,
"epsilon": self.epsilon,
"n_tilings": self.n_tilings,
"grid_dims": self.grid_dims,
"explore_weight": self.explore_weight,
"temporal_discount": self.temporal_discount,
"n_simulated_actions": self.n_simulated_actions,
}
self.episode_history = {"state_actions": [], "rewards": []}
def act(self, obs):
r"""
Execute the behavior policy--an :math:`\epsilon`-soft policy used to
generate actions during training.
Parameters
----------
obs : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by ``env.step(action)``
An observation from the environment.
Returns
-------
action : int, float, or :py:class:`ndarray <numpy.ndarray>`
An action sampled from the distribution over actions defined by the
epsilon-soft policy.
""" # noqa: E501
s = self._obs2num[obs]
return self.behavior_policy(s)
def _epsilon_soft_policy(self, s, a=None):
"""
Epsilon-soft exploration policy.
In epsilon-soft policies, pi(a|s) > 0 for all s ∈ S and all a ∈ A(s) at
the start of training. As learning progresses, pi gradually shifts
closer and closer to a deterministic optimal policy.
In particular, we have:
pi(a|s) = 1 - epsilon + (epsilon / |A(s)|) IFF a == a*
pi(a|s) = epsilon / |A(s)| IFF a != a*
where
|A(s)| is the number of actions available in state s
a* ∈ A(s) is the greedy action in state s (i.e., a* = argmax_a Q(s, a))
Note that epsilon-greedy policies are instances of epsilon-soft
policies, defined as policies for which pi(a|s) >= epsilon / |A(s)| for
all states and actions.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
self._obs2num[obs]
a : int, float, or tuple
The action number in the current state, as returned by
self._action2num[obs]. If None, sample an action from the action
probabilities in state s, otherwise, return the probability of
action `a` under the epsilon-soft policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by :meth:`_num2action`
If `a` is None, returns an action sampled from the distribution
over actions defined by the epsilon-soft policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
epsilon-soft policy.
""" # noqa: E501
E, P = self.env_info, self.parameters
# TODO: this assumes all actions are available in every state
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([P["Q"][(s, aa)] for aa in range(n_actions)])
p_a_star = 1.0 - self.epsilon + (self.epsilon / n_actions)
p_a = self.epsilon / n_actions
action_probs = np.ones(n_actions) * p_a
action_probs[a_star] = p_a_star
np.testing.assert_allclose(np.sum(action_probs), 1)
if a is not None:
return action_probs[a]
# sample action
a = np.random.multinomial(1, action_probs).argmax()
return self._num2action[a]
def _greedy(self, s, a=None):
"""
A greedy behavior policy.
Parameters
----------
s : int, float, or tuple
The state number for the current observation, as returned by
self._obs2num[obs]
a : int, float, or tuple
The action number in the current state, as returned by
self._action2num[obs]. If None, sample an action from the action
probabilities in state s, otherwise, return the probability of
action `a` under the greedy policy. Default is None.
Returns
-------
If `a` is None:
action : int, float, or :py:class:`ndarray <numpy.ndarray>` as returned by :meth:`_num2action`
If `a` is None, returns an action sampled from the distribution
over actions defined by the greedy policy.
If `a` is not None:
action_prob : float in range [0, 1]
If `a` is not None, returns the probability of `a` under the
greedy policy.
""" # noqa: E501
E, Q = self.env_info, self.parameters["Q"]
n_actions = np.prod(E["n_actions_per_dim"])
a_star = np.argmax([Q[(s, aa)] for aa in range(n_actions)])
if a is None:
out = self._num2action[a_star]
else:
out = 1 if a == a_star else 0
return out
def update(self):
"""
Update the priority queue with the most recent (state, action) pair and
perform random-sample one-step tabular Q-planning.
Notes
-----
The planning algorithm uses a priority queue to retrieve the
state-action pairs from the agent's history which will result in the
largest change to its `Q`-value if backed up. When the first pair in
the queue is backed up, the effect on each of its predecessor pairs is
computed. If the predecessor's priority is greater than a small
threshold the pair is added to the queue and the process is repeated
until either the queue is empty or we exceed `n_simulated_actions`
updates.
"""
s, a = self.episode_history["state_actions"][-1]
self._update_queue(s, a)
self._simulate_behavior()
def _update_queue(self, s, a):
"""
Update the priority queue by calculating the priority for (s, a) and
inserting it into the queue if it exceeds a fixed (small) threshold.
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation
a : int as returned by `self._action2num`
The id for the action taken from state `s`
"""
sweep_queue = self.derived_variables["sweep_queue"]
# TODO: what's a good threshold here?
priority = self._calc_priority(s, a)
if priority >= 0.001:
if (s, a) in sweep_queue:
sweep_queue[(s, a)] = max(priority, sweep_queue[(s, a)])
else:
sweep_queue[(s, a)] = priority
def _calc_priority(self, s, a):
"""
Compute the "priority" for state-action pair (s, a). The priority P is
defined as:
P = sum_{s_} p(s_) * abs(r + temporal_discount * max_a {Q[s_, a]} - Q[s, a])
which corresponds to the absolute magnitude of the TD(0) Q-learning
backup for (s, a).
Parameters
----------
s : int as returned by `self._obs2num`
The id for the state/observation
a : int as returned by `self._action2num`
The id for the action taken from state `s`
Returns
-------
priority : float
The absolute magnitude of the full-backup TD(0) Q-learning update
for (s, a)
"""
priority = 0.0
E = self.env_info
Q = self.parameters["Q"]
env_model = self.parameters["model"]
n_actions = np.prod(E["n_actions_per_dim"])
outcome_probs = env_model.outcome_probs(s, a)
for (r, s_), p_rs_ in outcome_probs:
max_q = np.max([Q[(s_, aa)] for aa in range(n_actions)])
P = p_rs_ * (r + self.temporal_discount * max_q - Q[(s, a)])
priority += np.abs(P)
return priority
def _simulate_behavior(self):
"""
Perform random-sample one-step tabular Q-planning with prioritized
sweeping.
Notes
-----
This approach uses a priority queue to retrieve the state-action pairs
from the agent's history with largest change to their Q-values if
backed up. When the first pair in the queue is backed up, the effect on
each of its predecessor pairs is computed. If the predecessor's
priority is greater than a small threshold the pair is added to the
queue and the process is repeated until either the queue is empty or we
have exceeded a `n_simulated_actions` updates.
"""
env_model = self.parameters["model"]
sweep_queue = self.derived_variables["sweep_queue"]
for _ in range(self.n_simulated_actions):
if len(sweep_queue) == 0:
break
# select (s, a) pair with the largest update (priority)
sq_items = list(sweep_queue.items())
(s_sim, a_sim), _ = sorted(sq_items, key=lambda x: x[1], reverse=True)[0]
# remove entry from queue
del sweep_queue[(s_sim, a_sim)]
# update Q function for (s_sim, a_sim) using the full-backup
# version of the TD(0) Q-learning update
self._update(s_sim, a_sim)
# get all (_s, _a) pairs that lead to s_sim (ie., s_sim's predecessors)
pairs = env_model.state_action_pairs_leading_to_outcome(s_sim)
# add predecessors to queue if their priority exceeds thresh
for (_s, _a) in pairs:
self._update_queue(_s, _a)
def _update(self, s, a):
"""
Update Q using a full-backup version of the TD(0) Q-learning update:
Q(s, a) = Q(s, a) + lr *
sum_{r, s'} [
p(r, s' | s, a) * (r + gamma * max_a { Q(s', a) } - Q(s, a))
]
Parameters
----------
s : int as returned by ``self._obs2num``
The id for the state/observation
a : int as returned by ``self._action2num``
The id for the action taken from state `s`
"""
update = 0.0
env_model = self.parameters["model"]
E, D, Q = self.env_info, self.derived_variables, self.parameters["Q"]
n_actions = np.prod(E["n_actions_per_dim"])
# sample rewards from the model
outcome_probs = env_model.outcome_probs(s, a)
for (r, s_), p_rs_ in outcome_probs:
# encourage visiting long-untried actions by adding a "bonus"
# reward proportional to the sqrt of the time since last visit
if self.q_plus:
r += self.explore_weight * np.sqrt(D["steps_since_last_visit"][(s, a)])
max_q = np.max([Q[(s_, a_)] for a_ in range(n_actions)])
update += p_rs_ * (r + self.temporal_discount * max_q - Q[(s, a)])
# update Q value for (s, a) pair
Q[(s, a)] += self.lr * update
def run_episode(self, max_steps, render=False):
"""
Run the agent on a single episode without performing `Q`-function
backups.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
return self._episode(max_steps, render, update=False)
def train_episode(self, max_steps, render=False):
"""
Train the agent on a single episode.
Parameters
----------
max_steps : int
The maximum number of steps to run an episode.
render : bool
Whether to render the episode during training.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
D = self.derived_variables
total_rwd, n_steps = self._episode(max_steps, render, update=True)
D["episode_num"] += 1
return total_rwd, n_steps
def _episode(self, max_steps, render, update=True):
"""
Run or train the agent on an episode.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during training.
update : bool
Whether to perform the `Q` function backups after each step.
Default is True.
Returns
-------
reward : float
The total reward on the episode.
steps : float
The number of steps taken on the episode.
"""
self.flush_history()
obs = self.env.reset()
env_model = self.parameters["model"]
HS, D = self.episode_history, self.derived_variables
action = self.act(obs)
s = self._obs2num[obs]
a = self._action2num[action]
# store initial (state, action) tuple
HS["state_actions"].append((s, a))
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
HS["rewards"].append(reward)
total_reward += reward
# generate next state and action
action = self.act(obs)
s_ = self._obs2num[obs] if not done else None
a_ = self._action2num[action]
# update model
env_model[(s, a, reward, s_)] += 1
# update history counter
for k in D["steps_since_last_visit"].keys():
D["steps_since_last_visit"][k] += 1
D["steps_since_last_visit"][(s, a)] = 0
if update:
self.update()
# store next (state, action) tuple
HS["state_actions"].append((s_, a_))
s, a = s_, a_
if done:
break
return total_reward, n_steps
def greedy_policy(self, max_steps, render=True):
"""
Execute a deterministic greedy policy using the current agent
parameters.
Parameters
----------
max_steps : int
The maximum number of steps to run the episode.
render : bool
Whether to render the episode during execution.
Returns
-------
total_reward : float
The total reward on the episode.
n_steps : float
The total number of steps taken on the episode.
"""
self.flush_history()
H = self.episode_history
obs = self.env.reset()
total_reward, n_steps = 0.0, 0
for i in range(max_steps):
if render:
self.env.render()
s = self._obs2num[obs]
action = self._greedy(s)
a = self._action2num[action]
# store (state, action) tuple
H["state_actions"].append((s, a))
# take action
obs, reward, done, info = self.env.step(action)
n_steps += 1
# record rewards
H["rewards"].append(reward)
total_reward += reward
if done:
break
return total_reward, n_steps
def test_dyna_agent():
seed = 12345
max_steps = 200
n_episodes = 150
lr = 0.4
q_plus = False
n_tilings = 10
epsilon = 0.10
grid_dims = [10, 10]
smooth_factor = 0.01
temporal_discount = 0.99
explore_weight = 0.05
n_simulated_actions = 25
obs_max, obs_min = 1, -1
env = gym.make("Taxi-v2")
agent = DynaAgent(
env,
lr=lr,
q_plus=q_plus,
obs_max=obs_max,
obs_min=obs_min,
epsilon=epsilon,
n_tilings=n_tilings,
grid_dims=grid_dims,
explore_weight=explore_weight,
temporal_discount=temporal_discount,
n_simulated_actions=n_simulated_actions,
)
trainer = Trainer(agent, env)
trainer.train(
n_episodes,
max_steps,
seed=seed,
plot=True,
verbose=True,
render_every=None,
smooth_factor=smooth_factor,
) | null |
18,146 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
sns.set_context("notebook", font_scale=0.7)
from numpy_ml.neural_nets.activations import (
Affine,
ReLU,
LeakyReLU,
Tanh,
Sigmoid,
ELU,
Exponential,
SELU,
HardSigmoid,
SoftPlus,
)
def plot_activations():
fig, axes = plt.subplots(2, 5, sharex=True, sharey=True)
fns = [
Affine(),
Tanh(),
Sigmoid(),
ReLU(),
LeakyReLU(),
ELU(),
Exponential(),
SELU(),
HardSigmoid(),
SoftPlus(),
]
for ax, fn in zip(axes.flatten(), fns):
X = np.linspace(-3, 3, 100).astype(float).reshape(100, 1)
ax.plot(X, fn(X), label=r"$y$", alpha=1.0)
ax.plot(X, fn.grad(X), label=r"$\frac{dy}{dx}$", alpha=1.0)
ax.plot(X, fn.grad2(X), label=r"$\frac{d^2 y}{dx^2}$", alpha=1.0)
ax.hlines(0, -3, 3, lw=1, linestyles="dashed", color="k")
ax.vlines(0, -1.2, 1.2, lw=1, linestyles="dashed", color="k")
ax.set_ylim(-1.1, 1.1)
ax.set_xlim(-3, 3)
ax.set_xticks([])
ax.set_yticks([-1, 0, 1])
ax.xaxis.set_visible(False)
# ax.yaxis.set_visible(False)
ax.set_title("{}".format(fn))
ax.legend(frameon=False)
sns.despine(left=True, bottom=True)
fig.set_size_inches(10, 5)
plt.tight_layout()
plt.savefig("img/plot.png", dpi=300)
plt.close("all") | null |
18,147 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.nonparametric import GPRegression, KNN, KernelRegression
from numpy_ml.linear_models.lm import LinearRegression
from sklearn.model_selection import train_test_split
def random_regression_problem(n_ex, n_in, n_out, d=3, intercept=0, std=1, seed=0):
coef = np.random.uniform(0, 50, size=d)
coef[-1] = intercept
y = []
X = np.random.uniform(-100, 100, size=(n_ex, n_in))
for x in X:
val = np.polyval(coef, x) + np.random.normal(0, std)
y.append(val)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=seed
)
return X_train, y_train, X_test, y_test, coef
def plot_regression():
np.random.seed(12345)
fig, axes = plt.subplots(4, 4)
for i, ax in enumerate(axes.flatten()):
n_in = 1
n_out = 1
d = np.random.randint(1, 5)
n_ex = np.random.randint(5, 500)
std = np.random.randint(0, 1000)
intercept = np.random.rand() * np.random.randint(-300, 300)
X_train, y_train, X_test, y_test, coefs = random_regression_problem(
n_ex, n_in, n_out, d=d, intercept=intercept, std=std, seed=i
)
LR = LinearRegression(fit_intercept=True)
LR.fit(X_train, y_train)
y_pred = LR.predict(X_test)
loss = np.mean((y_test.flatten() - y_pred.flatten()) ** 2)
d = 3
best_loss = np.inf
for gamma in np.linspace(1e-10, 1, 100):
for c0 in np.linspace(-1, 1000, 100):
kernel = "PolynomialKernel(d={}, gamma={}, c0={})".format(d, gamma, c0)
KR_poly = KernelRegression(kernel=kernel)
KR_poly.fit(X_train, y_train)
y_pred_poly = KR_poly.predict(X_test)
loss_poly = np.mean((y_test.flatten() - y_pred_poly.flatten()) ** 2)
if loss_poly <= best_loss:
KR_poly_best = kernel
best_loss = loss_poly
print("Best kernel: {} || loss: {:.4f}".format(KR_poly_best, best_loss))
KR_poly = KernelRegression(kernel=KR_poly_best)
KR_poly.fit(X_train, y_train)
KR_rbf = KernelRegression(kernel="RBFKernel(sigma=1)")
KR_rbf.fit(X_train, y_train)
y_pred_rbf = KR_rbf.predict(X_test)
loss_rbf = np.mean((y_test.flatten() - y_pred_rbf.flatten()) ** 2)
xmin = min(X_test) - 0.1 * (max(X_test) - min(X_test))
xmax = max(X_test) + 0.1 * (max(X_test) - min(X_test))
X_plot = np.linspace(xmin, xmax, 100)
y_plot = LR.predict(X_plot)
y_plot_poly = KR_poly.predict(X_plot)
y_plot_rbf = KR_rbf.predict(X_plot)
ax.scatter(X_test, y_test, alpha=0.5)
ax.plot(X_plot, y_plot, label="OLS", alpha=0.5)
ax.plot(
X_plot, y_plot_poly, label="KR (poly kernel, d={})".format(d), alpha=0.5
)
ax.plot(X_plot, y_plot_rbf, label="KR (rbf kernel)", alpha=0.5)
ax.legend()
# ax.set_title(
# "MSE\nLR: {:.2f} KR (poly): {:.2f}\nKR (rbf): {:.2f}".format(
# loss, loss_poly, loss_rbf
# )
# )
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
plt.savefig("img/kr_plots.png", dpi=300)
plt.close("all") | null |
18,148 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.nonparametric import GPRegression, KNN, KernelRegression
from numpy_ml.linear_models.lm import LinearRegression
from sklearn.model_selection import train_test_split
def random_regression_problem(n_ex, n_in, n_out, d=3, intercept=0, std=1, seed=0):
coef = np.random.uniform(0, 50, size=d)
coef[-1] = intercept
y = []
X = np.random.uniform(-100, 100, size=(n_ex, n_in))
for x in X:
val = np.polyval(coef, x) + np.random.normal(0, std)
y.append(val)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=seed
)
return X_train, y_train, X_test, y_test, coef
def plot_knn():
np.random.seed(12345)
fig, axes = plt.subplots(4, 4)
for i, ax in enumerate(axes.flatten()):
n_in = 1
n_out = 1
d = np.random.randint(1, 5)
n_ex = np.random.randint(5, 500)
std = np.random.randint(0, 1000)
intercept = np.random.rand() * np.random.randint(-300, 300)
X_train, y_train, X_test, y_test, coefs = random_regression_problem(
n_ex, n_in, n_out, d=d, intercept=intercept, std=std, seed=i
)
LR = LinearRegression(fit_intercept=True)
LR.fit(X_train, y_train)
y_pred = LR.predict(X_test)
loss = np.mean((y_test.flatten() - y_pred.flatten()) ** 2)
knn_1 = KNN(k=1, classifier=False, leaf_size=10, weights="uniform")
knn_1.fit(X_train, y_train)
y_pred_1 = knn_1.predict(X_test)
loss_1 = np.mean((y_test.flatten() - y_pred_1.flatten()) ** 2)
knn_5 = KNN(k=5, classifier=False, leaf_size=10, weights="uniform")
knn_5.fit(X_train, y_train)
y_pred_5 = knn_5.predict(X_test)
loss_5 = np.mean((y_test.flatten() - y_pred_5.flatten()) ** 2)
knn_10 = KNN(k=10, classifier=False, leaf_size=10, weights="uniform")
knn_10.fit(X_train, y_train)
y_pred_10 = knn_10.predict(X_test)
loss_10 = np.mean((y_test.flatten() - y_pred_10.flatten()) ** 2)
xmin = min(X_test) - 0.1 * (max(X_test) - min(X_test))
xmax = max(X_test) + 0.1 * (max(X_test) - min(X_test))
X_plot = np.linspace(xmin, xmax, 100)
y_plot = LR.predict(X_plot)
y_plot_1 = knn_1.predict(X_plot)
y_plot_5 = knn_5.predict(X_plot)
y_plot_10 = knn_10.predict(X_plot)
ax.scatter(X_test, y_test, alpha=0.5)
ax.plot(X_plot, y_plot, label="OLS", alpha=0.5)
ax.plot(X_plot, y_plot_1, label="KNN (k=1)", alpha=0.5)
ax.plot(X_plot, y_plot_5, label="KNN (k=5)", alpha=0.5)
ax.plot(X_plot, y_plot_10, label="KNN (k=10)", alpha=0.5)
ax.legend()
# ax.set_title(
# "MSE\nLR: {:.2f} KR (poly): {:.2f}\nKR (rbf): {:.2f}".format(
# loss, loss_poly, loss_rbf
# )
# )
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
plt.savefig("img/knn_plots.png", dpi=300)
plt.close("all") | null |
18,149 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
sns.set_context("paper", font_scale=0.5)
from numpy_ml.nonparametric import GPRegression, KNN, KernelRegression
from numpy_ml.linear_models.lm import LinearRegression
from sklearn.model_selection import train_test_split
def plot_gp():
np.random.seed(12345)
sns.set_context("paper", font_scale=0.65)
X_test = np.linspace(-10, 10, 100)
X_train = np.array([-3, 0, 7, 1, -9])
y_train = np.sin(X_train)
fig, axes = plt.subplots(2, 2)
alphas = [0, 1e-10, 1e-5, 1]
for ix, (ax, alpha) in enumerate(zip(axes.flatten(), alphas)):
G = GPRegression(kernel="RBFKernel", alpha=alpha)
G.fit(X_train, y_train)
y_pred, conf = G.predict(X_test)
ax.plot(X_train, y_train, "rx", label="observed")
ax.plot(X_test, np.sin(X_test), label="true fn")
ax.plot(X_test, y_pred, "--", label="MAP (alpha={})".format(alpha))
ax.fill_between(X_test, y_pred + conf, y_pred - conf, alpha=0.1)
ax.set_xticks([])
ax.set_yticks([])
sns.despine()
ax.legend()
plt.tight_layout()
plt.savefig("img/gp_alpha.png", dpi=300)
plt.close("all") | null |
18,150 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
sns.set_context("paper", font_scale=0.5)
from numpy_ml.nonparametric import GPRegression, KNN, KernelRegression
from numpy_ml.linear_models.lm import LinearRegression
from sklearn.model_selection import train_test_split
def plot_gp_dist():
np.random.seed(12345)
sns.set_context("paper", font_scale=0.95)
X_test = np.linspace(-10, 10, 100)
X_train = np.array([-3, 0, 7, 1, -9])
y_train = np.sin(X_train)
fig, axes = plt.subplots(1, 3)
G = GPRegression(kernel="RBFKernel", alpha=0)
G.fit(X_train, y_train)
y_pred_prior = G.sample(X_test, 3, "prior")
y_pred_posterior = G.sample(X_test, 3, "posterior_predictive")
for prior_sample in y_pred_prior:
axes[0].plot(X_test, prior_sample.ravel(), lw=1)
axes[0].set_title("Prior samples")
axes[0].set_xticks([])
axes[0].set_yticks([])
for post_sample in y_pred_posterior:
axes[1].plot(X_test, post_sample.ravel(), lw=1)
axes[1].plot(X_train, y_train, "ko", ms=1.2)
axes[1].set_title("Posterior samples")
axes[1].set_xticks([])
axes[1].set_yticks([])
y_pred, conf = G.predict(X_test)
axes[2].plot(X_test, np.sin(X_test), lw=1, label="true function")
axes[2].plot(X_test, y_pred, lw=1, label="MAP estimate")
axes[2].fill_between(X_test, y_pred + conf, y_pred - conf, alpha=0.1)
axes[2].plot(X_train, y_train, "ko", ms=1.2, label="observed")
axes[2].legend(fontsize="x-small")
axes[2].set_title("Posterior mean")
axes[2].set_xticks([])
axes[2].set_yticks([])
fig.set_size_inches(6, 2)
plt.tight_layout()
plt.savefig("img/gp_dist.png", dpi=300)
plt.close("all") | null |
18,151 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets.samples_generator import make_blobs
from sklearn.linear_model import LogisticRegression as LogisticRegression_sk
from sklearn.datasets import make_regression
from sklearn.metrics import zero_one_loss, r2_score
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.linear_models import (
RidgeRegression,
LinearRegression,
BayesianLinearRegressionKnownVariance,
BayesianLinearRegressionUnknownVariance,
LogisticRegression,
)
def random_binary_tensor(shape, sparsity=0.5):
X = (np.random.rand(*shape) >= (1 - sparsity)).astype(float)
return X | null |
18,152 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets.samples_generator import make_blobs
from sklearn.linear_model import LogisticRegression as LogisticRegression_sk
from sklearn.datasets import make_regression
from sklearn.metrics import zero_one_loss, r2_score
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.linear_models import (
RidgeRegression,
LinearRegression,
BayesianLinearRegressionKnownVariance,
BayesianLinearRegressionUnknownVariance,
LogisticRegression,
)
def random_classification_problem(n_ex, n_classes, n_in, seed=0):
X, y = make_blobs(
n_samples=n_ex, centers=n_classes, n_features=n_in, random_state=seed
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=seed
)
return X_train, y_train, X_test, y_test
def plot_logistic():
np.random.seed(12345)
fig, axes = plt.subplots(4, 4)
for i, ax in enumerate(axes.flatten()):
n_in = 1
n_ex = 150
X_train, y_train, X_test, y_test = random_classification_problem(
n_ex, n_classes=2, n_in=n_in, seed=i
)
LR = LogisticRegression(penalty="l2", gamma=0.2, fit_intercept=True)
LR.fit(X_train, y_train, lr=0.1, tol=1e-7, max_iter=1e7)
y_pred = (LR.predict(X_test) >= 0.5) * 1.0
loss = zero_one_loss(y_test, y_pred) * 100.0
LR_sk = LogisticRegression_sk(
penalty="l2", tol=0.0001, C=0.8, fit_intercept=True, random_state=i
)
LR_sk.fit(X_train, y_train)
y_pred_sk = (LR_sk.predict(X_test) >= 0.5) * 1.0
loss_sk = zero_one_loss(y_test, y_pred_sk) * 100.0
xmin = min(X_test) - 0.1 * (max(X_test) - min(X_test))
xmax = max(X_test) + 0.1 * (max(X_test) - min(X_test))
X_plot = np.linspace(xmin, xmax, 100)
y_plot = LR.predict(X_plot)
y_plot_sk = LR_sk.predict_proba(X_plot.reshape(-1, 1))[:, 1]
ax.scatter(X_test[y_pred == 0], y_test[y_pred == 0], alpha=0.5)
ax.scatter(X_test[y_pred == 1], y_test[y_pred == 1], alpha=0.5)
ax.plot(X_plot, y_plot, label="mine", alpha=0.75)
ax.plot(X_plot, y_plot_sk, label="sklearn", alpha=0.75)
ax.legend()
ax.set_title("Loss mine: {:.2f} Loss sklearn: {:.2f}".format(loss, loss_sk))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
plt.savefig("plot_logistic.png", dpi=300)
plt.close("all") | null |
18,153 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets.samples_generator import make_blobs
from sklearn.linear_model import LogisticRegression as LogisticRegression_sk
from sklearn.datasets import make_regression
from sklearn.metrics import zero_one_loss, r2_score
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.linear_models import (
RidgeRegression,
LinearRegression,
BayesianLinearRegressionKnownVariance,
BayesianLinearRegressionUnknownVariance,
LogisticRegression,
)
def random_regression_problem(n_ex, n_in, n_out, intercept=0, std=1, seed=0):
def plot_bayes():
np.random.seed(12345)
n_in = 1
n_out = 1
n_ex = 20
std = 15
intercept = 10
X_train, y_train, X_test, y_test, coefs = random_regression_problem(
n_ex, n_in, n_out, intercept=intercept, std=std, seed=0
)
# add some outliers
x1, x2 = X_train[0] + 0.5, X_train[6] - 0.3
y1 = np.dot(x1, coefs) + intercept + 25
y2 = np.dot(x2, coefs) + intercept - 31
X_train = np.vstack([X_train, np.array([x1, x2])])
y_train = np.hstack([y_train, [y1[0], y2[0]]])
LR = LinearRegression(fit_intercept=True)
LR.fit(X_train, y_train)
y_pred = LR.predict(X_test)
loss = np.mean((y_test - y_pred) ** 2)
ridge = RidgeRegression(alpha=1, fit_intercept=True)
ridge.fit(X_train, y_train)
y_pred = ridge.predict(X_test)
loss_ridge = np.mean((y_test - y_pred) ** 2)
LR_var = BayesianLinearRegressionKnownVariance(
mu=np.c_[intercept, coefs][0], sigma=np.sqrt(std), V=None, fit_intercept=True,
)
LR_var.fit(X_train, y_train)
y_pred_var = LR_var.predict(X_test)
loss_var = np.mean((y_test - y_pred_var) ** 2)
LR_novar = BayesianLinearRegressionUnknownVariance(
alpha=1, beta=2, mu=np.c_[intercept, coefs][0], V=None, fit_intercept=True
)
LR_novar.fit(X_train, y_train)
y_pred_novar = LR_novar.predict(X_test)
loss_novar = np.mean((y_test - y_pred_novar) ** 2)
xmin = min(X_test) - 0.1 * (max(X_test) - min(X_test))
xmax = max(X_test) + 0.1 * (max(X_test) - min(X_test))
X_plot = np.linspace(xmin, xmax, 100)
y_plot = LR.predict(X_plot)
y_plot_ridge = ridge.predict(X_plot)
y_plot_var = LR_var.predict(X_plot)
y_plot_novar = LR_novar.predict(X_plot)
y_true = [np.dot(x, coefs) + intercept for x in X_plot]
fig, axes = plt.subplots(1, 4)
axes = axes.flatten()
axes[0].scatter(X_test, y_test)
axes[0].plot(X_plot, y_plot, label="MLE")
axes[0].plot(X_plot, y_true, label="True fn")
axes[0].set_title("Linear Regression\nMLE Test MSE: {:.2f}".format(loss))
axes[0].legend()
# axes[0].fill_between(X_plot, y_plot - error, y_plot + error)
axes[1].scatter(X_test, y_test)
axes[1].plot(X_plot, y_plot_ridge, label="MLE")
axes[1].plot(X_plot, y_true, label="True fn")
axes[1].set_title(
"Ridge Regression (alpha=1)\nMLE Test MSE: {:.2f}".format(loss_ridge)
)
axes[1].legend()
axes[2].plot(X_plot, y_plot_var, label="MAP")
mu, cov = LR_var.posterior["b"].mean, LR_var.posterior["b"].cov
for k in range(200):
b_samp = np.random.multivariate_normal(mu, cov)
y_samp = [np.dot(x, b_samp[1]) + b_samp[0] for x in X_plot]
axes[2].plot(X_plot, y_samp, alpha=0.05)
axes[2].scatter(X_test, y_test)
axes[2].plot(X_plot, y_true, label="True fn")
axes[2].legend()
axes[2].set_title(
"Bayesian Regression (known variance)\nMAP Test MSE: {:.2f}".format(loss_var)
)
axes[3].plot(X_plot, y_plot_novar, label="MAP")
mu = LR_novar.posterior["b | sigma**2"].mean
cov = LR_novar.posterior["b | sigma**2"].cov
for k in range(200):
b_samp = np.random.multivariate_normal(mu, cov)
y_samp = [np.dot(x, b_samp[1]) + b_samp[0] for x in X_plot]
axes[3].plot(X_plot, y_samp, alpha=0.05)
axes[3].scatter(X_test, y_test)
axes[3].plot(X_plot, y_true, label="True fn")
axes[3].legend()
axes[3].set_title(
"Bayesian Regression (unknown variance)\nMAP Test MSE: {:.2f}".format(
loss_novar
)
)
for ax in axes:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.set_size_inches(7.5, 1.875)
plt.savefig("plot_bayes.png", dpi=300)
plt.close("all") | null |
18,154 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets.samples_generator import make_blobs
from sklearn.linear_model import LogisticRegression as LogisticRegression_sk
from sklearn.datasets import make_regression
from sklearn.metrics import zero_one_loss, r2_score
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.linear_models import (
RidgeRegression,
LinearRegression,
BayesianLinearRegressionKnownVariance,
BayesianLinearRegressionUnknownVariance,
LogisticRegression,
)
def random_regression_problem(n_ex, n_in, n_out, intercept=0, std=1, seed=0):
X, y, coef = make_regression(
n_samples=n_ex,
n_features=n_in,
n_targets=n_out,
bias=intercept,
noise=std,
coef=True,
random_state=seed,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=seed
)
return X_train, y_train, X_test, y_test, coef
def plot_regression():
np.random.seed(12345)
fig, axes = plt.subplots(4, 4)
for i, ax in enumerate(axes.flatten()):
n_in = 1
n_out = 1
n_ex = 50
std = np.random.randint(0, 100)
intercept = np.random.rand() * np.random.randint(-300, 300)
X_train, y_train, X_test, y_test, coefs = random_regression_problem(
n_ex, n_in, n_out, intercept=intercept, std=std, seed=i
)
LR = LinearRegression(fit_intercept=True)
LR.fit(X_train, y_train)
y_pred = LR.predict(X_test)
loss = np.mean((y_test - y_pred) ** 2)
r2 = r2_score(y_test, y_pred)
LR_var = BayesianLinearRegressionKnownVariance(
mu=np.c_[intercept, coefs][0],
sigma=np.sqrt(std),
V=None,
fit_intercept=True,
)
LR_var.fit(X_train, y_train)
y_pred_var = LR_var.predict(X_test)
loss_var = np.mean((y_test - y_pred_var) ** 2)
r2_var = r2_score(y_test, y_pred_var)
LR_novar = BayesianLinearRegressionUnknownVariance(
alpha=1, beta=2, mu=np.c_[intercept, coefs][0], V=None, fit_intercept=True,
)
LR_novar.fit(X_train, y_train)
y_pred_novar = LR_novar.predict(X_test)
loss_novar = np.mean((y_test - y_pred_novar) ** 2)
r2_novar = r2_score(y_test, y_pred_novar)
xmin = min(X_test) - 0.1 * (max(X_test) - min(X_test))
xmax = max(X_test) + 0.1 * (max(X_test) - min(X_test))
X_plot = np.linspace(xmin, xmax, 100)
y_plot = LR.predict(X_plot)
y_plot_var = LR_var.predict(X_plot)
y_plot_novar = LR_novar.predict(X_plot)
ax.scatter(X_test, y_test, marker="x", alpha=0.5)
ax.plot(X_plot, y_plot, label="linear regression", alpha=0.5)
ax.plot(X_plot, y_plot_var, label="Bayes (w var)", alpha=0.5)
ax.plot(X_plot, y_plot_novar, label="Bayes (no var)", alpha=0.5)
ax.legend()
ax.set_title(
"MSE\nLR: {:.2f} Bayes (w var): {:.2f}\nBayes (no var): {:.2f}".format(
loss, loss_var, loss_novar
)
)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
plt.savefig("plot_regression.png", dpi=300)
plt.close("all") | null |
18,155 | import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.neural_nets.schedulers import (
ConstantScheduler,
ExponentialScheduler,
NoamScheduler,
KingScheduler,
)
def king_loss_fn(x):
if x <= 250:
return -0.25 * x + 82.50372665317208
elif 250 < x <= 600:
return 20.00372665317208
elif 600 < x <= 700:
return -0.2 * x + 140.00372665317207
else:
return 0.003726653172066108
def plot_schedulers():
fig, axes = plt.subplots(2, 2)
schedulers = [
(
[ConstantScheduler(lr=0.01), "lr=1e-2"],
[ConstantScheduler(lr=0.008), "lr=8e-3"],
[ConstantScheduler(lr=0.006), "lr=6e-3"],
[ConstantScheduler(lr=0.004), "lr=4e-3"],
[ConstantScheduler(lr=0.002), "lr=2e-3"],
),
(
[
ExponentialScheduler(
lr=0.01, stage_length=250, staircase=False, decay=0.4
),
"lr=0.01, stage=250, stair=False, decay=0.4",
],
[
ExponentialScheduler(
lr=0.01, stage_length=250, staircase=True, decay=0.4
),
"lr=0.01, stage=250, stair=True, decay=0.4",
],
[
ExponentialScheduler(
lr=0.01, stage_length=125, staircase=True, decay=0.1
),
"lr=0.01, stage=125, stair=True, decay=0.1",
],
[
ExponentialScheduler(
lr=0.001, stage_length=250, staircase=False, decay=0.1
),
"lr=0.001, stage=250, stair=False, decay=0.1",
],
[
ExponentialScheduler(
lr=0.001, stage_length=125, staircase=False, decay=0.8
),
"lr=0.001, stage=125, stair=False, decay=0.8",
],
[
ExponentialScheduler(
lr=0.01, stage_length=250, staircase=False, decay=0.01
),
"lr=0.01, stage=250, stair=False, decay=0.01",
],
),
(
[
NoamScheduler(model_dim=512, scale_factor=1, warmup_steps=250),
"dim=512, scale=1, warmup=250",
],
[
NoamScheduler(model_dim=256, scale_factor=1, warmup_steps=250),
"dim=256, scale=1, warmup=250",
],
[
NoamScheduler(model_dim=512, scale_factor=1, warmup_steps=500),
"dim=512, scale=1, warmup=500",
],
[
NoamScheduler(model_dim=256, scale_factor=1, warmup_steps=500),
"dim=512, scale=1, warmup=500",
],
[
NoamScheduler(model_dim=512, scale_factor=2, warmup_steps=500),
"dim=512, scale=2, warmup=500",
],
[
NoamScheduler(model_dim=512, scale_factor=0.5, warmup_steps=500),
"dim=512, scale=0.5, warmup=500",
],
),
(
# [
# KingScheduler(initial_lr=0.01, patience=100, decay=0.1),
# "lr=0.01, patience=100, decay=0.8",
# ],
# [
# KingScheduler(initial_lr=0.01, patience=300, decay=0.999),
# "lr=0.01, patience=300, decay=0.999",
# ],
[
KingScheduler(initial_lr=0.009, patience=150, decay=0.995),
"lr=0.009, patience=150, decay=0.9999",
],
[
KingScheduler(initial_lr=0.008, patience=100, decay=0.995),
"lr=0.008, patience=100, decay=0.995",
],
[
KingScheduler(initial_lr=0.007, patience=50, decay=0.995),
"lr=0.007, patience=50, decay=0.995",
],
[
KingScheduler(initial_lr=0.005, patience=25, decay=0.9),
"lr=0.005, patience=25, decay=0.99",
],
),
]
for ax, schs, title in zip(
axes.flatten(), schedulers, ["Constant", "Exponential", "Noam", "King"]
):
t0 = time.time()
print("Running {} scheduler".format(title))
X = np.arange(1, 1000)
loss = np.array([king_loss_fn(x) for x in X])
# scale loss to fit on same axis as lr
scale = 0.01 / loss[0]
loss *= scale
if title == "King":
ax.plot(X, loss, ls=":", label="Loss")
for sc, lg in schs:
Y = np.array([sc(x, ll) for x, ll in zip(X, loss)])
ax.plot(X, Y, label=lg, alpha=0.6)
ax.legend(fontsize=5)
ax.set_xlabel("Steps")
ax.set_ylabel("Learning rate")
ax.set_title("{} scheduler".format(title))
print(
"Finished plotting {} runs of {} in {:.2f}s".format(
len(schs), title, time.time() - t0
)
)
plt.tight_layout()
plt.savefig("plot.png", dpi=300)
plt.close("all") | null |
18,156 | import numpy as np
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.datasets import make_blobs, make_regression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.trees import GradientBoostedDecisionTree, DecisionTree, RandomForest
def plot():
fig, axes = plt.subplots(4, 4)
fig.set_size_inches(10, 10)
for ax in axes.flatten():
n_ex = 100
n_trees = 50
n_feats = np.random.randint(2, 100)
max_depth_d = np.random.randint(1, 100)
max_depth_r = np.random.randint(1, 10)
classifier = np.random.choice([True, False])
if classifier:
# create classification problem
n_classes = np.random.randint(2, 10)
X, Y = make_blobs(n_samples=n_ex, centers=n_classes, n_features=2)
X, X_test, Y, Y_test = train_test_split(X, Y, test_size=0.3)
n_feats = min(n_feats, X.shape[1])
# initialize model
def loss(yp, y):
return accuracy_score(yp, y)
# initialize model
criterion = np.random.choice(["entropy", "gini"])
mine = RandomForest(
classifier=classifier,
n_feats=n_feats,
n_trees=n_trees,
criterion=criterion,
max_depth=max_depth_r,
)
mine_d = DecisionTree(
criterion=criterion, max_depth=max_depth_d, classifier=classifier
)
mine_g = GradientBoostedDecisionTree(
n_trees=n_trees,
max_depth=max_depth_d,
classifier=classifier,
learning_rate=1,
loss="crossentropy",
step_size="constant",
split_criterion=criterion,
)
else:
# create regeression problem
X, Y = make_regression(n_samples=n_ex, n_features=1)
X, X_test, Y, Y_test = train_test_split(X, Y, test_size=0.3)
n_feats = min(n_feats, X.shape[1])
# initialize model
criterion = "mse"
loss = mean_squared_error
mine = RandomForest(
criterion=criterion,
n_feats=n_feats,
n_trees=n_trees,
max_depth=max_depth_r,
classifier=classifier,
)
mine_d = DecisionTree(
criterion=criterion, max_depth=max_depth_d, classifier=classifier
)
mine_g = GradientBoostedDecisionTree(
n_trees=n_trees,
max_depth=max_depth_d,
classifier=classifier,
learning_rate=1,
loss="mse",
step_size="adaptive",
split_criterion=criterion,
)
# fit 'em
mine.fit(X, Y)
mine_d.fit(X, Y)
mine_g.fit(X, Y)
# get preds on test set
y_pred_mine_test = mine.predict(X_test)
y_pred_mine_test_d = mine_d.predict(X_test)
y_pred_mine_test_g = mine_g.predict(X_test)
loss_mine_test = loss(y_pred_mine_test, Y_test)
loss_mine_test_d = loss(y_pred_mine_test_d, Y_test)
loss_mine_test_g = loss(y_pred_mine_test_g, Y_test)
if classifier:
entries = [
("RF", loss_mine_test, y_pred_mine_test),
("DT", loss_mine_test_d, y_pred_mine_test_d),
("GB", loss_mine_test_g, y_pred_mine_test_g),
]
(lbl, test_loss, preds) = entries[np.random.randint(3)]
ax.set_title("{} Accuracy: {:.2f}%".format(lbl, test_loss * 100))
for i in np.unique(Y_test):
ax.scatter(
X_test[preds == i, 0].flatten(),
X_test[preds == i, 1].flatten(),
# s=0.5,
)
else:
X_ax = np.linspace(
np.min(X_test.flatten()) - 1, np.max(X_test.flatten()) + 1, 100
).reshape(-1, 1)
y_pred_mine_test = mine.predict(X_ax)
y_pred_mine_test_d = mine_d.predict(X_ax)
y_pred_mine_test_g = mine_g.predict(X_ax)
ax.scatter(X_test.flatten(), Y_test.flatten(), c="b", alpha=0.5)
# s=0.5)
ax.plot(
X_ax.flatten(),
y_pred_mine_test_g.flatten(),
# linewidth=0.5,
label="GB".format(n_trees, n_feats, max_depth_d),
color="red",
)
ax.plot(
X_ax.flatten(),
y_pred_mine_test.flatten(),
# linewidth=0.5,
label="RF".format(n_trees, n_feats, max_depth_r),
color="cornflowerblue",
)
ax.plot(
X_ax.flatten(),
y_pred_mine_test_d.flatten(),
# linewidth=0.5,
label="DT".format(max_depth_d),
color="yellowgreen",
)
ax.set_title(
"GB: {:.1f} / RF: {:.1f} / DT: {:.1f} ".format(
loss_mine_test_g, loss_mine_test, loss_mine_test_d
)
)
ax.legend()
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.savefig("plot.png", dpi=300)
plt.close("all") | null |
18,157 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.ngram import MLENGram, AdditiveNGram, GoodTuringNGram
def plot_count_models(GT, N):
NC = GT._num_grams_with_count
mod = GT._count_models[N]
max_n = max(GT.counts[N].values())
emp = [NC(n + 1, N) for n in range(max_n)]
prd = [np.exp(mod.predict(np.array([n + 1]))) for n in range(max_n + 10)]
plt.scatter(range(max_n), emp, c="r", label="actual")
plt.plot(range(max_n + 10), prd, "-", label="model")
plt.ylim([-1, 100])
plt.xlabel("Count ($r$)")
plt.ylabel("Count-of-counts ($N_r$)")
plt.legend()
plt.savefig("test.png")
plt.close() | null |
18,158 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.ngram import MLENGram, AdditiveNGram, GoodTuringNGram
def compare_probs(fp, N):
MLE = MLENGram(N, unk=False, filter_punctuation=False, filter_stopwords=False)
MLE.train(fp, encoding="utf-8-sig")
add_y, mle_y, gtt_y = [], [], []
addu_y, mleu_y, gttu_y = [], [], []
seen = ("<bol>", "the")
unseen = ("<bol>", "asdf")
GTT = GoodTuringNGram(
N, conf=1.96, unk=False, filter_stopwords=False, filter_punctuation=False
)
GTT.train(fp, encoding="utf-8-sig")
gtt_prob = GTT.log_prob(seen, N)
gtt_prob_u = GTT.log_prob(unseen, N)
for K in np.linspace(0, 10, 20):
ADD = AdditiveNGram(
N, K, unk=False, filter_punctuation=False, filter_stopwords=False
)
ADD.train(fp, encoding="utf-8-sig")
add_prob = ADD.log_prob(seen, N)
mle_prob = MLE.log_prob(seen, N)
add_y.append(add_prob)
mle_y.append(mle_prob)
gtt_y.append(gtt_prob)
mle_prob_u = MLE.log_prob(unseen, N)
add_prob_u = ADD.log_prob(unseen, N)
addu_y.append(add_prob_u)
mleu_y.append(mle_prob_u)
gttu_y.append(gtt_prob_u)
plt.plot(np.linspace(0, 10, 20), add_y, label="Additive (seen ngram)")
plt.plot(np.linspace(0, 10, 20), addu_y, label="Additive (unseen ngram)")
# plt.plot(np.linspace(0, 10, 20), gtt_y, label="Good-Turing (seen ngram)")
# plt.plot(np.linspace(0, 10, 20), gttu_y, label="Good-Turing (unseen ngram)")
plt.plot(np.linspace(0, 10, 20), mle_y, "--", label="MLE (seen ngram)")
plt.xlabel("K")
plt.ylabel("log P(sequence)")
plt.legend()
plt.savefig("img/add_smooth.png")
plt.close("all") | null |
18,159 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from numpy_ml.ngram import MLENGram, AdditiveNGram, GoodTuringNGram
The provided code snippet includes necessary dependencies for implementing the `plot_gt_freqs` function. Write a Python function `def plot_gt_freqs(fp)` to solve the following problem:
Draws a scatterplot of the empirical frequencies of the counted species versus their Simple Good Turing smoothed values, in rank order. Depends on pylab and matplotlib.
Here is the function:
def plot_gt_freqs(fp):
"""
Draws a scatterplot of the empirical frequencies of the counted species
versus their Simple Good Turing smoothed values, in rank order. Depends on
pylab and matplotlib.
"""
MLE = MLENGram(1, filter_punctuation=False, filter_stopwords=False)
MLE.train(fp, encoding="utf-8-sig")
counts = dict(MLE.counts[1])
GT = GoodTuringNGram(1, filter_stopwords=False, filter_punctuation=False)
GT.train(fp, encoding="utf-8-sig")
ADD = AdditiveNGram(1, 1, filter_punctuation=False, filter_stopwords=False)
ADD.train(fp, encoding="utf-8-sig")
tot = float(sum(counts.values()))
freqs = dict([(token, cnt / tot) for token, cnt in counts.items()])
sgt_probs = dict([(tok, np.exp(GT.log_prob(tok, 1))) for tok in counts.keys()])
as_probs = dict([(tok, np.exp(ADD.log_prob(tok, 1))) for tok in counts.keys()])
X, Y = np.arange(len(freqs)), sorted(freqs.values(), reverse=True)
plt.loglog(X, Y, "k+", alpha=0.25, label="MLE")
X, Y = np.arange(len(sgt_probs)), sorted(sgt_probs.values(), reverse=True)
plt.loglog(X, Y, "r+", alpha=0.25, label="simple Good-Turing")
X, Y = np.arange(len(as_probs)), sorted(as_probs.values(), reverse=True)
plt.loglog(X, Y, "b+", alpha=0.25, label="Laplace smoothing")
plt.xlabel("Rank")
plt.ylabel("Probability")
plt.legend()
plt.tight_layout()
plt.savefig("img/rank_probs.png")
plt.close("all") | Draws a scatterplot of the empirical frequencies of the counted species versus their Simple Good Turing smoothed values, in rank order. Depends on pylab and matplotlib. |
18,160 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
sns.set_context("paper", font_scale=1)
from numpy_ml.lda import LDA
def generate_corpus():
# Generate some fake data
D = 300
T = 10
V = 30
N = np.random.randint(150, 200, size=D)
# Create a document-topic distribution for 3 different types of documents
alpha1 = np.array((20, 15, 10, 1, 1, 1, 1, 1, 1, 1))
alpha2 = np.array((1, 1, 1, 10, 15, 20, 1, 1, 1, 1))
alpha3 = np.array((1, 1, 1, 1, 1, 1, 10, 12, 15, 18))
# Arbitrarily choose each topic to have 3 very common, diagnostic words
# These words are barely shared with any other topic
beta_probs = (
np.ones((V, T)) + np.array([np.arange(V) % T == t for t in range(T)]).T * 19
)
beta_gen = np.array(list(map(lambda x: np.random.dirichlet(x), beta_probs.T))).T
corpus = []
theta = np.empty((D, T))
# Generate each document from the LDA model
for d in range(D):
# Draw topic distribution for the document
if d < (D / 3):
theta[d, :] = np.random.dirichlet(alpha1, 1)[0]
elif d < 2 * (D / 3):
theta[d, :] = np.random.dirichlet(alpha2, 1)[0]
else:
theta[d, :] = np.random.dirichlet(alpha3, 1)[0]
doc = np.array([])
for n in range(N[d]):
# Draw a topic according to the document's topic distribution
z_n = np.random.choice(np.arange(T), p=theta[d, :])
# Draw a word according to the topic-word distribution
w_n = np.random.choice(np.arange(V), p=beta_gen[:, z_n])
doc = np.append(doc, w_n)
corpus.append(doc)
return corpus, T
def plot_unsmoothed():
corpus, T = generate_corpus()
L = LDA(T)
L.train(corpus, verbose=False)
fig, axes = plt.subplots(1, 2)
ax1 = sns.heatmap(L.beta, xticklabels=[], yticklabels=[], ax=axes[0])
ax1.set_xlabel("Topics")
ax1.set_ylabel("Words")
ax1.set_title("Recovered topic-word distribution")
ax2 = sns.heatmap(L.gamma, xticklabels=[], yticklabels=[], ax=axes[1])
ax2.set_xlabel("Topics")
ax2.set_ylabel("Documents")
ax2.set_title("Recovered document-topic distribution")
plt.savefig("img/plot_unsmoothed.png", dpi=300)
plt.close("all") | null |
18,161 | from collections import namedtuple
import numpy as np
from numpy_ml.bandits import (
MultinomialBandit,
BernoulliBandit,
ShortestPathBandit,
ContextualLinearBandit,
)
from numpy_ml.bandits.trainer import BanditTrainer
from numpy_ml.bandits.policies import (
EpsilonGreedy,
UCB1,
ThompsonSamplingBetaBinomial,
LinUCB,
)
from numpy_ml.utils.graphs import random_DAG, DiGraph, Edge
def random_multinomial_mab(n_arms=10, n_choices_per_arm=5, reward_range=[0, 1]):
"""Generate a random multinomial multi-armed bandit environemt"""
payoffs = []
payoff_probs = []
lo, hi = reward_range
for a in range(n_arms):
p = np.random.uniform(size=n_choices_per_arm)
p = p / p.sum()
r = np.random.uniform(low=lo, high=hi, size=n_choices_per_arm)
payoffs.append(list(r))
payoff_probs.append(list(p))
return MultinomialBandit(payoffs, payoff_probs)
class BanditTrainer:
def __init__(self):
"""
An object to facilitate multi-armed bandit training, comparison, and
evaluation.
"""
self.logs = {}
def compare(
self,
policies,
bandit,
n_trials,
n_duplicates,
plot=True,
seed=None,
smooth_weight=0.999,
out_dir=None,
):
"""
Compare the performance of multiple policies on the same bandit
environment, generating a plot for each.
Parameters
----------
policies : list of :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instances
The multi-armed bandit policies to compare.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to train the policies on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of times to evaluate each policy on the bandit
environment. Larger values permit a better estimate of the
variance in payoff / cumulative regret for each policy.
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
seed : int
The seed for the random number generator. Default is None.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
""" # noqa: E501
self.init_logs(policies)
all_axes = [None] * len(policies)
if plot and _PLOTTING:
fig, all_axes = plt.subplots(len(policies), 2, sharex=True)
fig.set_size_inches(10.5, len(policies) * 5.25)
for policy, axes in zip(policies, all_axes):
if seed:
np.random.seed(seed)
bandit.reset()
policy.reset()
self.train(
policy,
bandit,
n_trials,
n_duplicates,
axes=axes,
plot=plot,
verbose=False,
out_dir=out_dir,
smooth_weight=smooth_weight,
)
# enforce the same y-ranges across plots for straightforward comparison
a1_r, a2_r = zip(*[(a1.get_ylim(), a2.get_ylim()) for (a1, a2) in all_axes])
a1_min = min(a1_r, key=lambda x: x[0])[0]
a1_max = max(a1_r, key=lambda x: x[1])[1]
a2_min = min(a2_r, key=lambda x: x[0])[0]
a2_max = max(a2_r, key=lambda x: x[1])[1]
for (a1, a2) in all_axes:
a1.set_ylim(a1_min, a1_max)
a2.set_ylim(a2_min, a2_max)
if plot and _PLOTTING:
if out_dir is not None:
plt.savefig(op.join(out_dir, "bandit_comparison.png"), dpi=300)
plt.show()
def train(
self,
policy,
bandit,
n_trials,
n_duplicates,
plot=True,
axes=None,
verbose=True,
print_every=100,
smooth_weight=0.999,
out_dir=None,
):
"""
Train a MAB policies on a multi-armed bandit problem, logging training
statistics along the way.
Parameters
----------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The multi-armed bandit policy to train.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to run the policy on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of runs to evaluate
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
axes : list of :py:class:`Axis <matplotlib.axes.Axis>` instances or None
If not None and ``plot = True``, these are the axes that will be
used to plot the cumulative reward and regret, respectively.
Default is None.
verbose : boolean
Whether to print run statistics during training. Default is True.
print_every : int
The number of episodes to run before printing loss values to
stdout. This is ignored if ``verbose`` is false. Default is 100.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
Returns
-------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The policy trained during the last (i.e. most recent) duplicate
run.
""" # noqa: E501
if not str(policy) in self.logs:
self.init_logs(policy)
p = str(policy)
D, L = n_duplicates, self.logs
for d in range(D):
if verbose:
print("\nDUPLICATE {}/{}\n".format(d + 1, D))
bandit.reset()
policy.reset()
avg_oracle_reward, cregret = 0, 0
for trial_id in range(n_trials):
rwd, arm, orwd, oarm = self._train_step(bandit, policy)
loss = mse(bandit, policy)
regret = orwd - rwd
avg_oracle_reward += orwd
cregret += regret
L[p]["mse"][trial_id + 1].append(loss)
L[p]["reward"][trial_id + 1].append(rwd)
L[p]["regret"][trial_id + 1].append(regret)
L[p]["cregret"][trial_id + 1].append(cregret)
L[p]["optimal_arm"][trial_id + 1].append(oarm)
L[p]["selected_arm"][trial_id + 1].append(arm)
L[p]["optimal_reward"][trial_id + 1].append(orwd)
if (trial_id + 1) % print_every == 0 and verbose:
fstr = "Trial {}/{}, {}/{}, Regret: {:.4f}"
print(fstr.format(trial_id + 1, n_trials, d + 1, D, regret))
avg_oracle_reward /= n_trials
if verbose:
self._print_run_summary(bandit, policy, regret)
if plot and _PLOTTING:
self._plot_reward(avg_oracle_reward, policy, smooth_weight, axes, out_dir)
return policy
def _train_step(self, bandit, policy):
P, B = policy, bandit
C = B.get_context() if hasattr(B, "get_context") else None
rwd, arm = P.act(B, C)
oracle_rwd, oracle_arm = B.oracle_payoff(C)
return rwd, arm, oracle_rwd, oracle_arm
def init_logs(self, policies):
"""
Initialize the episode logs.
Notes
-----
Training logs are represented as a nested set of dictionaries with the
following structure:
log[model_id][metric][trial_number][duplicate_number]
For example, ``logs['model1']['regret'][3][1]`` holds the regret value
accrued on the 3rd trial of the 2nd duplicate run for model1.
Available fields are 'regret', 'cregret' (cumulative regret), 'reward',
'mse' (mean-squared error between estimated arm EVs and the true EVs),
'optimal_arm', 'selected_arm', and 'optimal_reward'.
"""
if not isinstance(policies, list):
policies = [policies]
self.logs = {
str(p): {
"mse": defaultdict(lambda: []),
"regret": defaultdict(lambda: []),
"reward": defaultdict(lambda: []),
"cregret": defaultdict(lambda: []),
"optimal_arm": defaultdict(lambda: []),
"selected_arm": defaultdict(lambda: []),
"optimal_reward": defaultdict(lambda: []),
}
for p in policies
}
def _print_run_summary(self, bandit, policy, regret):
if not hasattr(policy, "ev_estimates") or len(policy.ev_estimates) == 0:
return None
evs, se = bandit.arm_evs, []
fstr = "Arm {}: {:.4f} v. {:.4f}"
ests = sorted(policy.ev_estimates.items(), key=lambda x: x[0])
print("\n\nEstimated vs. Real EV\n" + "-" * 21)
for ix, (est, ev) in enumerate(zip(ests, evs)):
print(fstr.format(ix + 1, est[1], ev))
se.append((est[1] - ev) ** 2)
fstr = "\nFinal MSE: {:.4f}\nFinal Regret: {:.4f}\n\n"
print(fstr.format(np.mean(se), regret))
def _plot_reward(self, optimal_rwd, policy, smooth_weight, axes=None, out_dir=None):
L = self.logs[str(policy)]
smds = self._smoothed_metrics(policy, optimal_rwd, smooth_weight)
if axes is None:
fig, [ax1, ax2] = plt.subplots(1, 2)
else:
assert len(axes) == 2
ax1, ax2 = axes
e_ids = range(1, len(L["reward"]) + 1)
plot_params = [[ax1, ax2], ["reward", "cregret"], ["b", "r"], [optimal_rwd, 0]]
for (ax, m, c, opt) in zip(*plot_params):
avg, std = "sm_{}_avg sm_{}_std".format(m, m).split()
ax.plot(e_ids, smds[avg], color=c)
ax.axhline(opt, 0, 1, color=c, ls="--")
ax.fill_between(
e_ids,
smds[avg] + smds[std],
smds[avg] - smds[std],
color=c,
alpha=0.25,
)
ax.set_xlabel("Trial")
m = "Cumulative Regret" if m == "cregret" else m
ax.set_ylabel("Smoothed Avg. {}".format(m.title()))
if axes is None:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
if axes is not None:
ax.set_title(str(policy))
if axes is None:
fig.suptitle(str(policy))
fig.tight_layout()
if out_dir is not None:
bid = policy.hyperparameters["id"]
plt.savefig(op.join(out_dir, f"{bid}.png"), dpi=300)
plt.show()
return ax1, ax2
def _smoothed_metrics(self, policy, optimal_rwd, smooth_weight):
L = self.logs[str(policy)]
# pre-allocate smoothed data structure
smds = {}
for m in L.keys():
if m == "selections":
continue
smds["sm_{}_avg".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_avg".format(m)][0] = np.mean(L[m][1])
smds["sm_{}_std".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_std".format(m)][0] = np.std(L[m][1])
smoothed = {m: L[m][1] for m in L.keys()}
for e_id in range(2, len(L["reward"]) + 1):
for m in L.keys():
if m == "selections":
continue
prev, cur = smoothed[m], L[m][e_id]
smoothed[m] = [smooth(p, c, smooth_weight) for p, c in zip(prev, cur)]
smds["sm_{}_avg".format(m)][e_id - 1] = np.mean(smoothed[m])
smds["sm_{}_std".format(m)][e_id - 1] = np.std(smoothed[m])
return smds
class EpsilonGreedy(BanditPolicyBase):
def __init__(self, epsilon=0.05, ev_prior=0.5):
r"""
An epsilon-greedy policy for multi-armed bandit problems.
Notes
-----
Epsilon-greedy policies greedily select the arm with the highest
expected payoff with probability :math:`1-\epsilon`, and selects an arm
uniformly at random with probability :math:`\epsilon`:
.. math::
P(a) = \left\{
\begin{array}{lr}
\epsilon / N + (1 - \epsilon) &\text{if }
a = \arg \max_{a' \in \mathcal{A}}
\mathbb{E}_{q_{\hat{\theta}}}[r \mid a']\\
\epsilon / N &\text{otherwise}
\end{array}
\right.
where :math:`N = |\mathcal{A}|` is the number of arms,
:math:`q_{\hat{\theta}}` is the estimate of the arm payoff
distribution under current model parameters :math:`\hat{\theta}`, and
:math:`\mathbb{E}_{q_{\hat{\theta}}}[r \mid a']` is the expected
reward under :math:`q_{\hat{\theta}}` of receiving reward `r` after
taking action :math:`a'`.
Parameters
----------
epsilon : float in [0, 1]
The probability of taking a random action. Default is 0.05.
ev_prior : float
The starting expected payoff for each arm before any data has been
observed. Default is 0.5.
"""
super().__init__()
self.epsilon = epsilon
self.ev_prior = ev_prior
self.pull_counts = defaultdict(lambda: 0)
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {"ev_estimates": self.ev_estimates}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"id": "EpsilonGreedy",
"epsilon": self.epsilon,
"ev_prior": self.ev_prior,
}
def _initialize_params(self, bandit):
"""
Initialize any policy-specific parameters that depend on information
from the bandit environment.
"""
self.ev_estimates = {i: self.ev_prior for i in range(bandit.n_arms)}
self.is_initialized = True
def _select_arm(self, bandit, context=None):
if np.random.rand() < self.epsilon:
arm_id = np.random.choice(bandit.n_arms)
else:
ests = self.ev_estimates
(arm_id, _) = max(ests.items(), key=lambda x: x[1])
return arm_id
def _update_params(self, arm_id, reward, context=None):
E, C = self.ev_estimates, self.pull_counts
C[arm_id] += 1
E[arm_id] += (reward - E[arm_id]) / (C[arm_id])
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public `self.reset()` method.
"""
self.ev_estimates = {}
self.pull_counts = defaultdict(lambda: 0)
The provided code snippet includes necessary dependencies for implementing the `plot_epsilon_greedy_multinomial_payoff` function. Write a Python function `def plot_epsilon_greedy_multinomial_payoff()` to solve the following problem:
Evaluate an epsilon-greedy policy on a random multinomial bandit problem
Here is the function:
def plot_epsilon_greedy_multinomial_payoff():
"""
Evaluate an epsilon-greedy policy on a random multinomial bandit
problem
"""
np.random.seed(12345)
N = np.random.randint(2, 30) # n arms
K = np.random.randint(2, 10) # n payoffs / arm
ep_length = 1
rrange = [0, 1]
n_duplicates = 5
n_episodes = 5000
mab = random_multinomial_mab(N, K, rrange)
policy = EpsilonGreedy(epsilon=0.05, ev_prior=rrange[1] / 2)
policy = BanditTrainer().train(policy, mab, ep_length, n_episodes, n_duplicates) | Evaluate an epsilon-greedy policy on a random multinomial bandit problem |
18,162 | from collections import namedtuple
import numpy as np
from numpy_ml.bandits import (
MultinomialBandit,
BernoulliBandit,
ShortestPathBandit,
ContextualLinearBandit,
)
from numpy_ml.bandits.trainer import BanditTrainer
from numpy_ml.bandits.policies import (
EpsilonGreedy,
UCB1,
ThompsonSamplingBetaBinomial,
LinUCB,
)
from numpy_ml.utils.graphs import random_DAG, DiGraph, Edge
def random_multinomial_mab(n_arms=10, n_choices_per_arm=5, reward_range=[0, 1]):
"""Generate a random multinomial multi-armed bandit environemt"""
payoffs = []
payoff_probs = []
lo, hi = reward_range
for a in range(n_arms):
p = np.random.uniform(size=n_choices_per_arm)
p = p / p.sum()
r = np.random.uniform(low=lo, high=hi, size=n_choices_per_arm)
payoffs.append(list(r))
payoff_probs.append(list(p))
return MultinomialBandit(payoffs, payoff_probs)
class BanditTrainer:
def __init__(self):
"""
An object to facilitate multi-armed bandit training, comparison, and
evaluation.
"""
self.logs = {}
def compare(
self,
policies,
bandit,
n_trials,
n_duplicates,
plot=True,
seed=None,
smooth_weight=0.999,
out_dir=None,
):
"""
Compare the performance of multiple policies on the same bandit
environment, generating a plot for each.
Parameters
----------
policies : list of :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instances
The multi-armed bandit policies to compare.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to train the policies on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of times to evaluate each policy on the bandit
environment. Larger values permit a better estimate of the
variance in payoff / cumulative regret for each policy.
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
seed : int
The seed for the random number generator. Default is None.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
""" # noqa: E501
self.init_logs(policies)
all_axes = [None] * len(policies)
if plot and _PLOTTING:
fig, all_axes = plt.subplots(len(policies), 2, sharex=True)
fig.set_size_inches(10.5, len(policies) * 5.25)
for policy, axes in zip(policies, all_axes):
if seed:
np.random.seed(seed)
bandit.reset()
policy.reset()
self.train(
policy,
bandit,
n_trials,
n_duplicates,
axes=axes,
plot=plot,
verbose=False,
out_dir=out_dir,
smooth_weight=smooth_weight,
)
# enforce the same y-ranges across plots for straightforward comparison
a1_r, a2_r = zip(*[(a1.get_ylim(), a2.get_ylim()) for (a1, a2) in all_axes])
a1_min = min(a1_r, key=lambda x: x[0])[0]
a1_max = max(a1_r, key=lambda x: x[1])[1]
a2_min = min(a2_r, key=lambda x: x[0])[0]
a2_max = max(a2_r, key=lambda x: x[1])[1]
for (a1, a2) in all_axes:
a1.set_ylim(a1_min, a1_max)
a2.set_ylim(a2_min, a2_max)
if plot and _PLOTTING:
if out_dir is not None:
plt.savefig(op.join(out_dir, "bandit_comparison.png"), dpi=300)
plt.show()
def train(
self,
policy,
bandit,
n_trials,
n_duplicates,
plot=True,
axes=None,
verbose=True,
print_every=100,
smooth_weight=0.999,
out_dir=None,
):
"""
Train a MAB policies on a multi-armed bandit problem, logging training
statistics along the way.
Parameters
----------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The multi-armed bandit policy to train.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to run the policy on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of runs to evaluate
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
axes : list of :py:class:`Axis <matplotlib.axes.Axis>` instances or None
If not None and ``plot = True``, these are the axes that will be
used to plot the cumulative reward and regret, respectively.
Default is None.
verbose : boolean
Whether to print run statistics during training. Default is True.
print_every : int
The number of episodes to run before printing loss values to
stdout. This is ignored if ``verbose`` is false. Default is 100.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
Returns
-------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The policy trained during the last (i.e. most recent) duplicate
run.
""" # noqa: E501
if not str(policy) in self.logs:
self.init_logs(policy)
p = str(policy)
D, L = n_duplicates, self.logs
for d in range(D):
if verbose:
print("\nDUPLICATE {}/{}\n".format(d + 1, D))
bandit.reset()
policy.reset()
avg_oracle_reward, cregret = 0, 0
for trial_id in range(n_trials):
rwd, arm, orwd, oarm = self._train_step(bandit, policy)
loss = mse(bandit, policy)
regret = orwd - rwd
avg_oracle_reward += orwd
cregret += regret
L[p]["mse"][trial_id + 1].append(loss)
L[p]["reward"][trial_id + 1].append(rwd)
L[p]["regret"][trial_id + 1].append(regret)
L[p]["cregret"][trial_id + 1].append(cregret)
L[p]["optimal_arm"][trial_id + 1].append(oarm)
L[p]["selected_arm"][trial_id + 1].append(arm)
L[p]["optimal_reward"][trial_id + 1].append(orwd)
if (trial_id + 1) % print_every == 0 and verbose:
fstr = "Trial {}/{}, {}/{}, Regret: {:.4f}"
print(fstr.format(trial_id + 1, n_trials, d + 1, D, regret))
avg_oracle_reward /= n_trials
if verbose:
self._print_run_summary(bandit, policy, regret)
if plot and _PLOTTING:
self._plot_reward(avg_oracle_reward, policy, smooth_weight, axes, out_dir)
return policy
def _train_step(self, bandit, policy):
P, B = policy, bandit
C = B.get_context() if hasattr(B, "get_context") else None
rwd, arm = P.act(B, C)
oracle_rwd, oracle_arm = B.oracle_payoff(C)
return rwd, arm, oracle_rwd, oracle_arm
def init_logs(self, policies):
"""
Initialize the episode logs.
Notes
-----
Training logs are represented as a nested set of dictionaries with the
following structure:
log[model_id][metric][trial_number][duplicate_number]
For example, ``logs['model1']['regret'][3][1]`` holds the regret value
accrued on the 3rd trial of the 2nd duplicate run for model1.
Available fields are 'regret', 'cregret' (cumulative regret), 'reward',
'mse' (mean-squared error between estimated arm EVs and the true EVs),
'optimal_arm', 'selected_arm', and 'optimal_reward'.
"""
if not isinstance(policies, list):
policies = [policies]
self.logs = {
str(p): {
"mse": defaultdict(lambda: []),
"regret": defaultdict(lambda: []),
"reward": defaultdict(lambda: []),
"cregret": defaultdict(lambda: []),
"optimal_arm": defaultdict(lambda: []),
"selected_arm": defaultdict(lambda: []),
"optimal_reward": defaultdict(lambda: []),
}
for p in policies
}
def _print_run_summary(self, bandit, policy, regret):
if not hasattr(policy, "ev_estimates") or len(policy.ev_estimates) == 0:
return None
evs, se = bandit.arm_evs, []
fstr = "Arm {}: {:.4f} v. {:.4f}"
ests = sorted(policy.ev_estimates.items(), key=lambda x: x[0])
print("\n\nEstimated vs. Real EV\n" + "-" * 21)
for ix, (est, ev) in enumerate(zip(ests, evs)):
print(fstr.format(ix + 1, est[1], ev))
se.append((est[1] - ev) ** 2)
fstr = "\nFinal MSE: {:.4f}\nFinal Regret: {:.4f}\n\n"
print(fstr.format(np.mean(se), regret))
def _plot_reward(self, optimal_rwd, policy, smooth_weight, axes=None, out_dir=None):
L = self.logs[str(policy)]
smds = self._smoothed_metrics(policy, optimal_rwd, smooth_weight)
if axes is None:
fig, [ax1, ax2] = plt.subplots(1, 2)
else:
assert len(axes) == 2
ax1, ax2 = axes
e_ids = range(1, len(L["reward"]) + 1)
plot_params = [[ax1, ax2], ["reward", "cregret"], ["b", "r"], [optimal_rwd, 0]]
for (ax, m, c, opt) in zip(*plot_params):
avg, std = "sm_{}_avg sm_{}_std".format(m, m).split()
ax.plot(e_ids, smds[avg], color=c)
ax.axhline(opt, 0, 1, color=c, ls="--")
ax.fill_between(
e_ids,
smds[avg] + smds[std],
smds[avg] - smds[std],
color=c,
alpha=0.25,
)
ax.set_xlabel("Trial")
m = "Cumulative Regret" if m == "cregret" else m
ax.set_ylabel("Smoothed Avg. {}".format(m.title()))
if axes is None:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
if axes is not None:
ax.set_title(str(policy))
if axes is None:
fig.suptitle(str(policy))
fig.tight_layout()
if out_dir is not None:
bid = policy.hyperparameters["id"]
plt.savefig(op.join(out_dir, f"{bid}.png"), dpi=300)
plt.show()
return ax1, ax2
def _smoothed_metrics(self, policy, optimal_rwd, smooth_weight):
L = self.logs[str(policy)]
# pre-allocate smoothed data structure
smds = {}
for m in L.keys():
if m == "selections":
continue
smds["sm_{}_avg".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_avg".format(m)][0] = np.mean(L[m][1])
smds["sm_{}_std".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_std".format(m)][0] = np.std(L[m][1])
smoothed = {m: L[m][1] for m in L.keys()}
for e_id in range(2, len(L["reward"]) + 1):
for m in L.keys():
if m == "selections":
continue
prev, cur = smoothed[m], L[m][e_id]
smoothed[m] = [smooth(p, c, smooth_weight) for p, c in zip(prev, cur)]
smds["sm_{}_avg".format(m)][e_id - 1] = np.mean(smoothed[m])
smds["sm_{}_std".format(m)][e_id - 1] = np.std(smoothed[m])
return smds
class UCB1(BanditPolicyBase):
def __init__(self, C=1, ev_prior=0.5):
r"""
A UCB1 policy for multi-armed bandit problems.
Notes
-----
The UCB1 algorithm [*]_ guarantees the cumulative regret is bounded by log
`t`, where `t` is the current timestep. To make this guarantee UCB1
assumes all arm payoffs are between 0 and 1.
Under UCB1, the upper confidence bound on the expected value for
pulling arm `a` at timestep `t` is:
.. math::
\text{UCB}(a, t) = \text{EV}_t(a) + C \sqrt{\frac{2 \log t}{N_t(a)}}
where :math:`\text{EV}_t(a)` is the average of the rewards recieved so
far from pulling arm `a`, `C` is a free parameter controlling the
"optimism" of the confidence upper bound for :math:`\text{UCB}(a, t)`
(for logarithmic regret bounds, `C` must equal 1), and :math:`N_t(a)`
is the number of times arm `a` has been pulled during the previous `t -
1` timesteps.
References
----------
.. [*] Auer, P., Cesa-Bianchi, N., & Fischer, P. (2002). Finite-time
analysis of the multiarmed bandit problem. *Machine Learning,
47(2)*.
Parameters
----------
C : float in (0, +infinity)
A confidence/optimisim parameter affecting the degree of
exploration, where larger values encourage greater exploration. The
UCB1 algorithm assumes `C=1`. Default is 1.
ev_prior : float
The starting expected value for each arm before any data has been
observed. Default is 0.5.
"""
self.C = C
self.ev_prior = ev_prior
super().__init__()
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {"ev_estimates": self.ev_estimates}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"C": self.C,
"id": "UCB1",
"ev_prior": self.ev_prior,
}
def _initialize_params(self, bandit):
"""
Initialize any policy-specific parameters that depend on information
from the bandit environment.
"""
self.ev_estimates = {i: self.ev_prior for i in range(bandit.n_arms)}
self.is_initialized = True
def _select_arm(self, bandit, context=None):
# add eps to avoid divide-by-zero errors on the first pull of each arm
eps = np.finfo(float).eps
N, T = bandit.n_arms, self.step + 1
E, C = self.ev_estimates, self.pull_counts
scores = [E[a] + self.C * np.sqrt(np.log(T) / (C[a] + eps)) for a in range(N)]
return np.argmax(scores)
def _update_params(self, arm_id, reward, context=None):
E, C = self.ev_estimates, self.pull_counts
C[arm_id] += 1
E[arm_id] += (reward - E[arm_id]) / (C[arm_id])
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public :method:`reset` method.
"""
self.ev_estimates = {}
self.pull_counts = defaultdict(lambda: 0)
The provided code snippet includes necessary dependencies for implementing the `plot_ucb1_multinomial_payoff` function. Write a Python function `def plot_ucb1_multinomial_payoff()` to solve the following problem:
Evaluate the UCB1 policy on a multinomial bandit environment
Here is the function:
def plot_ucb1_multinomial_payoff():
"""Evaluate the UCB1 policy on a multinomial bandit environment"""
np.random.seed(12345)
N = np.random.randint(2, 30) # n arms
K = np.random.randint(2, 10) # n payoffs / arm
ep_length = 1
C = 1
rrange = [0, 1]
n_duplicates = 5
n_episodes = 5000
mab = random_multinomial_mab(N, K, rrange)
policy = UCB1(C=C, ev_prior=rrange[1] / 2)
policy = BanditTrainer().train(policy, mab, ep_length, n_episodes, n_duplicates) | Evaluate the UCB1 policy on a multinomial bandit environment |
18,163 | from collections import namedtuple
import numpy as np
from numpy_ml.bandits import (
MultinomialBandit,
BernoulliBandit,
ShortestPathBandit,
ContextualLinearBandit,
)
from numpy_ml.bandits.trainer import BanditTrainer
from numpy_ml.bandits.policies import (
EpsilonGreedy,
UCB1,
ThompsonSamplingBetaBinomial,
LinUCB,
)
from numpy_ml.utils.graphs import random_DAG, DiGraph, Edge
def random_bernoulli_mab(n_arms=10):
"""Generate a random Bernoulli multi-armed bandit environemt"""
p = np.random.uniform(size=n_arms)
payoff_probs = p / p.sum()
return BernoulliBandit(payoff_probs)
class BanditTrainer:
def __init__(self):
"""
An object to facilitate multi-armed bandit training, comparison, and
evaluation.
"""
self.logs = {}
def compare(
self,
policies,
bandit,
n_trials,
n_duplicates,
plot=True,
seed=None,
smooth_weight=0.999,
out_dir=None,
):
"""
Compare the performance of multiple policies on the same bandit
environment, generating a plot for each.
Parameters
----------
policies : list of :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instances
The multi-armed bandit policies to compare.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to train the policies on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of times to evaluate each policy on the bandit
environment. Larger values permit a better estimate of the
variance in payoff / cumulative regret for each policy.
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
seed : int
The seed for the random number generator. Default is None.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
""" # noqa: E501
self.init_logs(policies)
all_axes = [None] * len(policies)
if plot and _PLOTTING:
fig, all_axes = plt.subplots(len(policies), 2, sharex=True)
fig.set_size_inches(10.5, len(policies) * 5.25)
for policy, axes in zip(policies, all_axes):
if seed:
np.random.seed(seed)
bandit.reset()
policy.reset()
self.train(
policy,
bandit,
n_trials,
n_duplicates,
axes=axes,
plot=plot,
verbose=False,
out_dir=out_dir,
smooth_weight=smooth_weight,
)
# enforce the same y-ranges across plots for straightforward comparison
a1_r, a2_r = zip(*[(a1.get_ylim(), a2.get_ylim()) for (a1, a2) in all_axes])
a1_min = min(a1_r, key=lambda x: x[0])[0]
a1_max = max(a1_r, key=lambda x: x[1])[1]
a2_min = min(a2_r, key=lambda x: x[0])[0]
a2_max = max(a2_r, key=lambda x: x[1])[1]
for (a1, a2) in all_axes:
a1.set_ylim(a1_min, a1_max)
a2.set_ylim(a2_min, a2_max)
if plot and _PLOTTING:
if out_dir is not None:
plt.savefig(op.join(out_dir, "bandit_comparison.png"), dpi=300)
plt.show()
def train(
self,
policy,
bandit,
n_trials,
n_duplicates,
plot=True,
axes=None,
verbose=True,
print_every=100,
smooth_weight=0.999,
out_dir=None,
):
"""
Train a MAB policies on a multi-armed bandit problem, logging training
statistics along the way.
Parameters
----------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The multi-armed bandit policy to train.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to run the policy on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of runs to evaluate
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
axes : list of :py:class:`Axis <matplotlib.axes.Axis>` instances or None
If not None and ``plot = True``, these are the axes that will be
used to plot the cumulative reward and regret, respectively.
Default is None.
verbose : boolean
Whether to print run statistics during training. Default is True.
print_every : int
The number of episodes to run before printing loss values to
stdout. This is ignored if ``verbose`` is false. Default is 100.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
Returns
-------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The policy trained during the last (i.e. most recent) duplicate
run.
""" # noqa: E501
if not str(policy) in self.logs:
self.init_logs(policy)
p = str(policy)
D, L = n_duplicates, self.logs
for d in range(D):
if verbose:
print("\nDUPLICATE {}/{}\n".format(d + 1, D))
bandit.reset()
policy.reset()
avg_oracle_reward, cregret = 0, 0
for trial_id in range(n_trials):
rwd, arm, orwd, oarm = self._train_step(bandit, policy)
loss = mse(bandit, policy)
regret = orwd - rwd
avg_oracle_reward += orwd
cregret += regret
L[p]["mse"][trial_id + 1].append(loss)
L[p]["reward"][trial_id + 1].append(rwd)
L[p]["regret"][trial_id + 1].append(regret)
L[p]["cregret"][trial_id + 1].append(cregret)
L[p]["optimal_arm"][trial_id + 1].append(oarm)
L[p]["selected_arm"][trial_id + 1].append(arm)
L[p]["optimal_reward"][trial_id + 1].append(orwd)
if (trial_id + 1) % print_every == 0 and verbose:
fstr = "Trial {}/{}, {}/{}, Regret: {:.4f}"
print(fstr.format(trial_id + 1, n_trials, d + 1, D, regret))
avg_oracle_reward /= n_trials
if verbose:
self._print_run_summary(bandit, policy, regret)
if plot and _PLOTTING:
self._plot_reward(avg_oracle_reward, policy, smooth_weight, axes, out_dir)
return policy
def _train_step(self, bandit, policy):
P, B = policy, bandit
C = B.get_context() if hasattr(B, "get_context") else None
rwd, arm = P.act(B, C)
oracle_rwd, oracle_arm = B.oracle_payoff(C)
return rwd, arm, oracle_rwd, oracle_arm
def init_logs(self, policies):
"""
Initialize the episode logs.
Notes
-----
Training logs are represented as a nested set of dictionaries with the
following structure:
log[model_id][metric][trial_number][duplicate_number]
For example, ``logs['model1']['regret'][3][1]`` holds the regret value
accrued on the 3rd trial of the 2nd duplicate run for model1.
Available fields are 'regret', 'cregret' (cumulative regret), 'reward',
'mse' (mean-squared error between estimated arm EVs and the true EVs),
'optimal_arm', 'selected_arm', and 'optimal_reward'.
"""
if not isinstance(policies, list):
policies = [policies]
self.logs = {
str(p): {
"mse": defaultdict(lambda: []),
"regret": defaultdict(lambda: []),
"reward": defaultdict(lambda: []),
"cregret": defaultdict(lambda: []),
"optimal_arm": defaultdict(lambda: []),
"selected_arm": defaultdict(lambda: []),
"optimal_reward": defaultdict(lambda: []),
}
for p in policies
}
def _print_run_summary(self, bandit, policy, regret):
if not hasattr(policy, "ev_estimates") or len(policy.ev_estimates) == 0:
return None
evs, se = bandit.arm_evs, []
fstr = "Arm {}: {:.4f} v. {:.4f}"
ests = sorted(policy.ev_estimates.items(), key=lambda x: x[0])
print("\n\nEstimated vs. Real EV\n" + "-" * 21)
for ix, (est, ev) in enumerate(zip(ests, evs)):
print(fstr.format(ix + 1, est[1], ev))
se.append((est[1] - ev) ** 2)
fstr = "\nFinal MSE: {:.4f}\nFinal Regret: {:.4f}\n\n"
print(fstr.format(np.mean(se), regret))
def _plot_reward(self, optimal_rwd, policy, smooth_weight, axes=None, out_dir=None):
L = self.logs[str(policy)]
smds = self._smoothed_metrics(policy, optimal_rwd, smooth_weight)
if axes is None:
fig, [ax1, ax2] = plt.subplots(1, 2)
else:
assert len(axes) == 2
ax1, ax2 = axes
e_ids = range(1, len(L["reward"]) + 1)
plot_params = [[ax1, ax2], ["reward", "cregret"], ["b", "r"], [optimal_rwd, 0]]
for (ax, m, c, opt) in zip(*plot_params):
avg, std = "sm_{}_avg sm_{}_std".format(m, m).split()
ax.plot(e_ids, smds[avg], color=c)
ax.axhline(opt, 0, 1, color=c, ls="--")
ax.fill_between(
e_ids,
smds[avg] + smds[std],
smds[avg] - smds[std],
color=c,
alpha=0.25,
)
ax.set_xlabel("Trial")
m = "Cumulative Regret" if m == "cregret" else m
ax.set_ylabel("Smoothed Avg. {}".format(m.title()))
if axes is None:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
if axes is not None:
ax.set_title(str(policy))
if axes is None:
fig.suptitle(str(policy))
fig.tight_layout()
if out_dir is not None:
bid = policy.hyperparameters["id"]
plt.savefig(op.join(out_dir, f"{bid}.png"), dpi=300)
plt.show()
return ax1, ax2
def _smoothed_metrics(self, policy, optimal_rwd, smooth_weight):
L = self.logs[str(policy)]
# pre-allocate smoothed data structure
smds = {}
for m in L.keys():
if m == "selections":
continue
smds["sm_{}_avg".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_avg".format(m)][0] = np.mean(L[m][1])
smds["sm_{}_std".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_std".format(m)][0] = np.std(L[m][1])
smoothed = {m: L[m][1] for m in L.keys()}
for e_id in range(2, len(L["reward"]) + 1):
for m in L.keys():
if m == "selections":
continue
prev, cur = smoothed[m], L[m][e_id]
smoothed[m] = [smooth(p, c, smooth_weight) for p, c in zip(prev, cur)]
smds["sm_{}_avg".format(m)][e_id - 1] = np.mean(smoothed[m])
smds["sm_{}_std".format(m)][e_id - 1] = np.std(smoothed[m])
return smds
class ThompsonSamplingBetaBinomial(BanditPolicyBase):
def __init__(self, alpha=1, beta=1):
r"""
A conjugate Thompson sampling [1]_ [2]_ policy for multi-armed bandits with
Bernoulli likelihoods.
Notes
-----
The policy assumes independent Beta priors on the Bernoulli arm payoff
probabilities, :math:`\theta`:
.. math::
\theta_k \sim \text{Beta}(\alpha_k, \beta_k) \\
r \mid \theta_k \sim \text{Bernoulli}(\theta_k)
where :math:`k \in \{1,\ldots,K \}` indexes arms in the MAB and
:math:`\theta_k` is the parameter of the Bernoulli likelihood for arm
`k`. The sampler begins by selecting an arm with probability
proportional to its payoff probability under the initial Beta prior.
After pulling the sampled arm and receiving a reward, `r`, the sampler
computes the posterior over the model parameters (arm payoffs) via
Bayes' rule, and then samples a new action in proportion to its payoff
probability under this posterior. This process (i.e., sample action
from posterior, take action and receive reward, compute updated
posterior) is repeated until the number of trials is exhausted.
Note that due to the conjugacy between the Beta prior and Bernoulli
likelihood the posterior for each arm will also be Beta-distributed and
can computed and sampled from efficiently:
.. math::
\theta_k \mid r \sim \text{Beta}(\alpha_k + r, \beta_k + 1 - r)
References
----------
.. [1] Thompson, W. (1933). On the likelihood that one unknown
probability exceeds another in view of the evidence of two samples.
*Biometrika, 25(3/4)*, 285-294.
.. [2] Chapelle, O., & Li, L. (2011). An empirical evaluation of
Thompson sampling. *Advances in Neural Information Processing
Systems, 24*, 2249-2257.
Parameters
----------
alpha : float or list of length `K`
Parameter for the Beta prior on arm payouts. If a float, this value
will be used in the prior for all of the `K` arms.
beta : float or list of length `K`
Parameter for the Beta prior on arm payouts. If a float, this value
will be used in the prior for all of the `K` arms.
"""
super().__init__()
self.alphas, self.betas = [], []
self.alpha, self.beta = alpha, beta
self.is_initialized = False
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {
"ev_estimates": self.ev_estimates,
"alphas": self.alphas,
"betas": self.betas,
}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"id": "ThompsonSamplingBetaBinomial",
"alpha": self.alpha,
"beta": self.beta,
}
def _initialize_params(self, bandit):
bhp = bandit.hyperparameters
fstr = "ThompsonSamplingBetaBinomial only defined for BernoulliBandit, got: {}"
assert bhp["id"] == "BernoulliBandit", fstr.format(bhp["id"])
# initialize the model prior
if is_number(self.alpha):
self.alphas = [self.alpha] * bandit.n_arms
if is_number(self.beta):
self.betas = [self.beta] * bandit.n_arms
assert len(self.alphas) == len(self.betas) == bandit.n_arms
self.ev_estimates = {i: self._map_estimate(i, 1) for i in range(bandit.n_arms)}
self.is_initialized = True
def _select_arm(self, bandit, context):
if not self.is_initialized:
self._initialize_prior(bandit)
# draw a sample from the current model posterior
posterior_sample = np.random.beta(self.alphas, self.betas)
# greedily select an action based on this sample
return np.argmax(posterior_sample)
def _update_params(self, arm_id, rwd, context):
"""
Compute the parameters of the Beta posterior, P(payoff prob | rwd),
for arm `arm_id`.
"""
self.alphas[arm_id] += rwd
self.betas[arm_id] += 1 - rwd
self.ev_estimates[arm_id] = self._map_estimate(arm_id, rwd)
def _map_estimate(self, arm_id, rwd):
"""Compute the current MAP estimate for an arm's payoff probability"""
A, B = self.alphas, self.betas
if A[arm_id] > 1 and B[arm_id] > 1:
map_payoff_prob = (A[arm_id] - 1) / (A[arm_id] + B[arm_id] - 2)
elif A[arm_id] < 1 and B[arm_id] < 1:
map_payoff_prob = rwd # 0 or 1 equally likely, make a guess
elif A[arm_id] <= 1 and B[arm_id] > 1:
map_payoff_prob = 0
elif A[arm_id] > 1 and B[arm_id] <= 1:
map_payoff_prob = 1
else:
map_payoff_prob = 0.5
return map_payoff_prob
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public `self.reset()` method.
"""
self.alphas, self.betas = [], []
self.ev_estimates = {}
The provided code snippet includes necessary dependencies for implementing the `plot_thompson_sampling_beta_binomial_payoff` function. Write a Python function `def plot_thompson_sampling_beta_binomial_payoff()` to solve the following problem:
Evaluate the ThompsonSamplingBetaBinomial policy on a random Bernoulli multi-armed bandit.
Here is the function:
def plot_thompson_sampling_beta_binomial_payoff():
"""
Evaluate the ThompsonSamplingBetaBinomial policy on a random Bernoulli
multi-armed bandit.
"""
np.random.seed(12345)
N = np.random.randint(2, 30) # n arms
ep_length = 1
n_duplicates = 5
n_episodes = 5000
mab = random_bernoulli_mab(N)
policy = ThompsonSamplingBetaBinomial(alpha=1, beta=1)
policy = BanditTrainer().train(policy, mab, ep_length, n_episodes, n_duplicates) | Evaluate the ThompsonSamplingBetaBinomial policy on a random Bernoulli multi-armed bandit. |
18,164 | from collections import namedtuple
import numpy as np
from numpy_ml.bandits import (
MultinomialBandit,
BernoulliBandit,
ShortestPathBandit,
ContextualLinearBandit,
)
from numpy_ml.bandits.trainer import BanditTrainer
from numpy_ml.bandits.policies import (
EpsilonGreedy,
UCB1,
ThompsonSamplingBetaBinomial,
LinUCB,
)
from numpy_ml.utils.graphs import random_DAG, DiGraph, Edge
class BanditTrainer:
def __init__(self):
"""
An object to facilitate multi-armed bandit training, comparison, and
evaluation.
"""
self.logs = {}
def compare(
self,
policies,
bandit,
n_trials,
n_duplicates,
plot=True,
seed=None,
smooth_weight=0.999,
out_dir=None,
):
"""
Compare the performance of multiple policies on the same bandit
environment, generating a plot for each.
Parameters
----------
policies : list of :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instances
The multi-armed bandit policies to compare.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to train the policies on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of times to evaluate each policy on the bandit
environment. Larger values permit a better estimate of the
variance in payoff / cumulative regret for each policy.
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
seed : int
The seed for the random number generator. Default is None.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
""" # noqa: E501
self.init_logs(policies)
all_axes = [None] * len(policies)
if plot and _PLOTTING:
fig, all_axes = plt.subplots(len(policies), 2, sharex=True)
fig.set_size_inches(10.5, len(policies) * 5.25)
for policy, axes in zip(policies, all_axes):
if seed:
np.random.seed(seed)
bandit.reset()
policy.reset()
self.train(
policy,
bandit,
n_trials,
n_duplicates,
axes=axes,
plot=plot,
verbose=False,
out_dir=out_dir,
smooth_weight=smooth_weight,
)
# enforce the same y-ranges across plots for straightforward comparison
a1_r, a2_r = zip(*[(a1.get_ylim(), a2.get_ylim()) for (a1, a2) in all_axes])
a1_min = min(a1_r, key=lambda x: x[0])[0]
a1_max = max(a1_r, key=lambda x: x[1])[1]
a2_min = min(a2_r, key=lambda x: x[0])[0]
a2_max = max(a2_r, key=lambda x: x[1])[1]
for (a1, a2) in all_axes:
a1.set_ylim(a1_min, a1_max)
a2.set_ylim(a2_min, a2_max)
if plot and _PLOTTING:
if out_dir is not None:
plt.savefig(op.join(out_dir, "bandit_comparison.png"), dpi=300)
plt.show()
def train(
self,
policy,
bandit,
n_trials,
n_duplicates,
plot=True,
axes=None,
verbose=True,
print_every=100,
smooth_weight=0.999,
out_dir=None,
):
"""
Train a MAB policies on a multi-armed bandit problem, logging training
statistics along the way.
Parameters
----------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The multi-armed bandit policy to train.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to run the policy on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of runs to evaluate
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
axes : list of :py:class:`Axis <matplotlib.axes.Axis>` instances or None
If not None and ``plot = True``, these are the axes that will be
used to plot the cumulative reward and regret, respectively.
Default is None.
verbose : boolean
Whether to print run statistics during training. Default is True.
print_every : int
The number of episodes to run before printing loss values to
stdout. This is ignored if ``verbose`` is false. Default is 100.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
Returns
-------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The policy trained during the last (i.e. most recent) duplicate
run.
""" # noqa: E501
if not str(policy) in self.logs:
self.init_logs(policy)
p = str(policy)
D, L = n_duplicates, self.logs
for d in range(D):
if verbose:
print("\nDUPLICATE {}/{}\n".format(d + 1, D))
bandit.reset()
policy.reset()
avg_oracle_reward, cregret = 0, 0
for trial_id in range(n_trials):
rwd, arm, orwd, oarm = self._train_step(bandit, policy)
loss = mse(bandit, policy)
regret = orwd - rwd
avg_oracle_reward += orwd
cregret += regret
L[p]["mse"][trial_id + 1].append(loss)
L[p]["reward"][trial_id + 1].append(rwd)
L[p]["regret"][trial_id + 1].append(regret)
L[p]["cregret"][trial_id + 1].append(cregret)
L[p]["optimal_arm"][trial_id + 1].append(oarm)
L[p]["selected_arm"][trial_id + 1].append(arm)
L[p]["optimal_reward"][trial_id + 1].append(orwd)
if (trial_id + 1) % print_every == 0 and verbose:
fstr = "Trial {}/{}, {}/{}, Regret: {:.4f}"
print(fstr.format(trial_id + 1, n_trials, d + 1, D, regret))
avg_oracle_reward /= n_trials
if verbose:
self._print_run_summary(bandit, policy, regret)
if plot and _PLOTTING:
self._plot_reward(avg_oracle_reward, policy, smooth_weight, axes, out_dir)
return policy
def _train_step(self, bandit, policy):
P, B = policy, bandit
C = B.get_context() if hasattr(B, "get_context") else None
rwd, arm = P.act(B, C)
oracle_rwd, oracle_arm = B.oracle_payoff(C)
return rwd, arm, oracle_rwd, oracle_arm
def init_logs(self, policies):
"""
Initialize the episode logs.
Notes
-----
Training logs are represented as a nested set of dictionaries with the
following structure:
log[model_id][metric][trial_number][duplicate_number]
For example, ``logs['model1']['regret'][3][1]`` holds the regret value
accrued on the 3rd trial of the 2nd duplicate run for model1.
Available fields are 'regret', 'cregret' (cumulative regret), 'reward',
'mse' (mean-squared error between estimated arm EVs and the true EVs),
'optimal_arm', 'selected_arm', and 'optimal_reward'.
"""
if not isinstance(policies, list):
policies = [policies]
self.logs = {
str(p): {
"mse": defaultdict(lambda: []),
"regret": defaultdict(lambda: []),
"reward": defaultdict(lambda: []),
"cregret": defaultdict(lambda: []),
"optimal_arm": defaultdict(lambda: []),
"selected_arm": defaultdict(lambda: []),
"optimal_reward": defaultdict(lambda: []),
}
for p in policies
}
def _print_run_summary(self, bandit, policy, regret):
if not hasattr(policy, "ev_estimates") or len(policy.ev_estimates) == 0:
return None
evs, se = bandit.arm_evs, []
fstr = "Arm {}: {:.4f} v. {:.4f}"
ests = sorted(policy.ev_estimates.items(), key=lambda x: x[0])
print("\n\nEstimated vs. Real EV\n" + "-" * 21)
for ix, (est, ev) in enumerate(zip(ests, evs)):
print(fstr.format(ix + 1, est[1], ev))
se.append((est[1] - ev) ** 2)
fstr = "\nFinal MSE: {:.4f}\nFinal Regret: {:.4f}\n\n"
print(fstr.format(np.mean(se), regret))
def _plot_reward(self, optimal_rwd, policy, smooth_weight, axes=None, out_dir=None):
L = self.logs[str(policy)]
smds = self._smoothed_metrics(policy, optimal_rwd, smooth_weight)
if axes is None:
fig, [ax1, ax2] = plt.subplots(1, 2)
else:
assert len(axes) == 2
ax1, ax2 = axes
e_ids = range(1, len(L["reward"]) + 1)
plot_params = [[ax1, ax2], ["reward", "cregret"], ["b", "r"], [optimal_rwd, 0]]
for (ax, m, c, opt) in zip(*plot_params):
avg, std = "sm_{}_avg sm_{}_std".format(m, m).split()
ax.plot(e_ids, smds[avg], color=c)
ax.axhline(opt, 0, 1, color=c, ls="--")
ax.fill_between(
e_ids,
smds[avg] + smds[std],
smds[avg] - smds[std],
color=c,
alpha=0.25,
)
ax.set_xlabel("Trial")
m = "Cumulative Regret" if m == "cregret" else m
ax.set_ylabel("Smoothed Avg. {}".format(m.title()))
if axes is None:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
if axes is not None:
ax.set_title(str(policy))
if axes is None:
fig.suptitle(str(policy))
fig.tight_layout()
if out_dir is not None:
bid = policy.hyperparameters["id"]
plt.savefig(op.join(out_dir, f"{bid}.png"), dpi=300)
plt.show()
return ax1, ax2
def _smoothed_metrics(self, policy, optimal_rwd, smooth_weight):
L = self.logs[str(policy)]
# pre-allocate smoothed data structure
smds = {}
for m in L.keys():
if m == "selections":
continue
smds["sm_{}_avg".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_avg".format(m)][0] = np.mean(L[m][1])
smds["sm_{}_std".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_std".format(m)][0] = np.std(L[m][1])
smoothed = {m: L[m][1] for m in L.keys()}
for e_id in range(2, len(L["reward"]) + 1):
for m in L.keys():
if m == "selections":
continue
prev, cur = smoothed[m], L[m][e_id]
smoothed[m] = [smooth(p, c, smooth_weight) for p, c in zip(prev, cur)]
smds["sm_{}_avg".format(m)][e_id - 1] = np.mean(smoothed[m])
smds["sm_{}_std".format(m)][e_id - 1] = np.std(smoothed[m])
return smds
class LinUCB(BanditPolicyBase):
def __init__(self, alpha=1):
"""
A disjoint linear UCB policy [*]_ for contextual linear bandits.
Notes
-----
LinUCB is only defined for :class:`ContextualLinearBandit <numpy_ml.bandits.ContextualLinearBandit>` environments.
References
----------
.. [*] Li, L., Chu, W., Langford, J., & Schapire, R. (2010). A
contextual-bandit approach to personalized news article
recommendation. In *Proceedings of the 19th International Conference
on World Wide Web*, 661-670.
Parameters
----------
alpha : float
A confidence/optimisim parameter affecting the amount of
exploration. Default is 1.
""" # noqa
super().__init__()
self.alpha = alpha
self.A, self.b = [], []
self.is_initialized = False
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {"ev_estimates": self.ev_estimates, "A": self.A, "b": self.b}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"id": "LinUCB",
"alpha": self.alpha,
}
def _initialize_params(self, bandit):
"""
Initialize any policy-specific parameters that depend on information
from the bandit environment.
"""
bhp = bandit.hyperparameters
fstr = "LinUCB only defined for contextual linear bandits, got: {}"
assert bhp["id"] == "ContextualLinearBandit", fstr.format(bhp["id"])
self.A, self.b = [], []
for _ in range(bandit.n_arms):
self.A.append(np.eye(bandit.D))
self.b.append(np.zeros(bandit.D))
self.is_initialized = True
def _select_arm(self, bandit, context):
probs = []
for a in range(bandit.n_arms):
C, A, b = context[:, a], self.A[a], self.b[a]
A_inv = np.linalg.inv(A)
theta_hat = A_inv @ b
p = theta_hat @ C + self.alpha * np.sqrt(C.T @ A_inv @ C)
probs.append(p)
return np.argmax(probs)
def _update_params(self, arm_id, rwd, context):
"""Compute the parameters for A and b."""
self.A[arm_id] += context[:, arm_id] @ context[:, arm_id].T
self.b[arm_id] += rwd * context[:, arm_id]
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public `self.reset()` method.
"""
self.A, self.b = [], []
self.ev_estimates = {}
The provided code snippet includes necessary dependencies for implementing the `plot_lin_ucb` function. Write a Python function `def plot_lin_ucb()` to solve the following problem:
Plot the linUCB policy on a contextual linear bandit problem
Here is the function:
def plot_lin_ucb():
"""Plot the linUCB policy on a contextual linear bandit problem"""
np.random.seed(12345)
ep_length = 1
K = np.random.randint(2, 25)
D = np.random.randint(2, 10)
n_duplicates = 5
n_episodes = 5000
cmab = ContextualLinearBandit(K, D, 1)
policy = LinUCB(alpha=1)
policy = BanditTrainer().train(policy, cmab, ep_length, n_episodes, n_duplicates) | Plot the linUCB policy on a contextual linear bandit problem |
18,165 | from collections import namedtuple
import numpy as np
from numpy_ml.bandits import (
MultinomialBandit,
BernoulliBandit,
ShortestPathBandit,
ContextualLinearBandit,
)
from numpy_ml.bandits.trainer import BanditTrainer
from numpy_ml.bandits.policies import (
EpsilonGreedy,
UCB1,
ThompsonSamplingBetaBinomial,
LinUCB,
)
from numpy_ml.utils.graphs import random_DAG, DiGraph, Edge
class BanditTrainer:
def __init__(self):
"""
An object to facilitate multi-armed bandit training, comparison, and
evaluation.
"""
self.logs = {}
def compare(
self,
policies,
bandit,
n_trials,
n_duplicates,
plot=True,
seed=None,
smooth_weight=0.999,
out_dir=None,
):
"""
Compare the performance of multiple policies on the same bandit
environment, generating a plot for each.
Parameters
----------
policies : list of :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instances
The multi-armed bandit policies to compare.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to train the policies on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of times to evaluate each policy on the bandit
environment. Larger values permit a better estimate of the
variance in payoff / cumulative regret for each policy.
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
seed : int
The seed for the random number generator. Default is None.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
""" # noqa: E501
self.init_logs(policies)
all_axes = [None] * len(policies)
if plot and _PLOTTING:
fig, all_axes = plt.subplots(len(policies), 2, sharex=True)
fig.set_size_inches(10.5, len(policies) * 5.25)
for policy, axes in zip(policies, all_axes):
if seed:
np.random.seed(seed)
bandit.reset()
policy.reset()
self.train(
policy,
bandit,
n_trials,
n_duplicates,
axes=axes,
plot=plot,
verbose=False,
out_dir=out_dir,
smooth_weight=smooth_weight,
)
# enforce the same y-ranges across plots for straightforward comparison
a1_r, a2_r = zip(*[(a1.get_ylim(), a2.get_ylim()) for (a1, a2) in all_axes])
a1_min = min(a1_r, key=lambda x: x[0])[0]
a1_max = max(a1_r, key=lambda x: x[1])[1]
a2_min = min(a2_r, key=lambda x: x[0])[0]
a2_max = max(a2_r, key=lambda x: x[1])[1]
for (a1, a2) in all_axes:
a1.set_ylim(a1_min, a1_max)
a2.set_ylim(a2_min, a2_max)
if plot and _PLOTTING:
if out_dir is not None:
plt.savefig(op.join(out_dir, "bandit_comparison.png"), dpi=300)
plt.show()
def train(
self,
policy,
bandit,
n_trials,
n_duplicates,
plot=True,
axes=None,
verbose=True,
print_every=100,
smooth_weight=0.999,
out_dir=None,
):
"""
Train a MAB policies on a multi-armed bandit problem, logging training
statistics along the way.
Parameters
----------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The multi-armed bandit policy to train.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to run the policy on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of runs to evaluate
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
axes : list of :py:class:`Axis <matplotlib.axes.Axis>` instances or None
If not None and ``plot = True``, these are the axes that will be
used to plot the cumulative reward and regret, respectively.
Default is None.
verbose : boolean
Whether to print run statistics during training. Default is True.
print_every : int
The number of episodes to run before printing loss values to
stdout. This is ignored if ``verbose`` is false. Default is 100.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
Returns
-------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The policy trained during the last (i.e. most recent) duplicate
run.
""" # noqa: E501
if not str(policy) in self.logs:
self.init_logs(policy)
p = str(policy)
D, L = n_duplicates, self.logs
for d in range(D):
if verbose:
print("\nDUPLICATE {}/{}\n".format(d + 1, D))
bandit.reset()
policy.reset()
avg_oracle_reward, cregret = 0, 0
for trial_id in range(n_trials):
rwd, arm, orwd, oarm = self._train_step(bandit, policy)
loss = mse(bandit, policy)
regret = orwd - rwd
avg_oracle_reward += orwd
cregret += regret
L[p]["mse"][trial_id + 1].append(loss)
L[p]["reward"][trial_id + 1].append(rwd)
L[p]["regret"][trial_id + 1].append(regret)
L[p]["cregret"][trial_id + 1].append(cregret)
L[p]["optimal_arm"][trial_id + 1].append(oarm)
L[p]["selected_arm"][trial_id + 1].append(arm)
L[p]["optimal_reward"][trial_id + 1].append(orwd)
if (trial_id + 1) % print_every == 0 and verbose:
fstr = "Trial {}/{}, {}/{}, Regret: {:.4f}"
print(fstr.format(trial_id + 1, n_trials, d + 1, D, regret))
avg_oracle_reward /= n_trials
if verbose:
self._print_run_summary(bandit, policy, regret)
if plot and _PLOTTING:
self._plot_reward(avg_oracle_reward, policy, smooth_weight, axes, out_dir)
return policy
def _train_step(self, bandit, policy):
P, B = policy, bandit
C = B.get_context() if hasattr(B, "get_context") else None
rwd, arm = P.act(B, C)
oracle_rwd, oracle_arm = B.oracle_payoff(C)
return rwd, arm, oracle_rwd, oracle_arm
def init_logs(self, policies):
"""
Initialize the episode logs.
Notes
-----
Training logs are represented as a nested set of dictionaries with the
following structure:
log[model_id][metric][trial_number][duplicate_number]
For example, ``logs['model1']['regret'][3][1]`` holds the regret value
accrued on the 3rd trial of the 2nd duplicate run for model1.
Available fields are 'regret', 'cregret' (cumulative regret), 'reward',
'mse' (mean-squared error between estimated arm EVs and the true EVs),
'optimal_arm', 'selected_arm', and 'optimal_reward'.
"""
if not isinstance(policies, list):
policies = [policies]
self.logs = {
str(p): {
"mse": defaultdict(lambda: []),
"regret": defaultdict(lambda: []),
"reward": defaultdict(lambda: []),
"cregret": defaultdict(lambda: []),
"optimal_arm": defaultdict(lambda: []),
"selected_arm": defaultdict(lambda: []),
"optimal_reward": defaultdict(lambda: []),
}
for p in policies
}
def _print_run_summary(self, bandit, policy, regret):
if not hasattr(policy, "ev_estimates") or len(policy.ev_estimates) == 0:
return None
evs, se = bandit.arm_evs, []
fstr = "Arm {}: {:.4f} v. {:.4f}"
ests = sorted(policy.ev_estimates.items(), key=lambda x: x[0])
print("\n\nEstimated vs. Real EV\n" + "-" * 21)
for ix, (est, ev) in enumerate(zip(ests, evs)):
print(fstr.format(ix + 1, est[1], ev))
se.append((est[1] - ev) ** 2)
fstr = "\nFinal MSE: {:.4f}\nFinal Regret: {:.4f}\n\n"
print(fstr.format(np.mean(se), regret))
def _plot_reward(self, optimal_rwd, policy, smooth_weight, axes=None, out_dir=None):
L = self.logs[str(policy)]
smds = self._smoothed_metrics(policy, optimal_rwd, smooth_weight)
if axes is None:
fig, [ax1, ax2] = plt.subplots(1, 2)
else:
assert len(axes) == 2
ax1, ax2 = axes
e_ids = range(1, len(L["reward"]) + 1)
plot_params = [[ax1, ax2], ["reward", "cregret"], ["b", "r"], [optimal_rwd, 0]]
for (ax, m, c, opt) in zip(*plot_params):
avg, std = "sm_{}_avg sm_{}_std".format(m, m).split()
ax.plot(e_ids, smds[avg], color=c)
ax.axhline(opt, 0, 1, color=c, ls="--")
ax.fill_between(
e_ids,
smds[avg] + smds[std],
smds[avg] - smds[std],
color=c,
alpha=0.25,
)
ax.set_xlabel("Trial")
m = "Cumulative Regret" if m == "cregret" else m
ax.set_ylabel("Smoothed Avg. {}".format(m.title()))
if axes is None:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
if axes is not None:
ax.set_title(str(policy))
if axes is None:
fig.suptitle(str(policy))
fig.tight_layout()
if out_dir is not None:
bid = policy.hyperparameters["id"]
plt.savefig(op.join(out_dir, f"{bid}.png"), dpi=300)
plt.show()
return ax1, ax2
def _smoothed_metrics(self, policy, optimal_rwd, smooth_weight):
L = self.logs[str(policy)]
# pre-allocate smoothed data structure
smds = {}
for m in L.keys():
if m == "selections":
continue
smds["sm_{}_avg".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_avg".format(m)][0] = np.mean(L[m][1])
smds["sm_{}_std".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_std".format(m)][0] = np.std(L[m][1])
smoothed = {m: L[m][1] for m in L.keys()}
for e_id in range(2, len(L["reward"]) + 1):
for m in L.keys():
if m == "selections":
continue
prev, cur = smoothed[m], L[m][e_id]
smoothed[m] = [smooth(p, c, smooth_weight) for p, c in zip(prev, cur)]
smds["sm_{}_avg".format(m)][e_id - 1] = np.mean(smoothed[m])
smds["sm_{}_std".format(m)][e_id - 1] = np.std(smoothed[m])
return smds
class UCB1(BanditPolicyBase):
def __init__(self, C=1, ev_prior=0.5):
r"""
A UCB1 policy for multi-armed bandit problems.
Notes
-----
The UCB1 algorithm [*]_ guarantees the cumulative regret is bounded by log
`t`, where `t` is the current timestep. To make this guarantee UCB1
assumes all arm payoffs are between 0 and 1.
Under UCB1, the upper confidence bound on the expected value for
pulling arm `a` at timestep `t` is:
.. math::
\text{UCB}(a, t) = \text{EV}_t(a) + C \sqrt{\frac{2 \log t}{N_t(a)}}
where :math:`\text{EV}_t(a)` is the average of the rewards recieved so
far from pulling arm `a`, `C` is a free parameter controlling the
"optimism" of the confidence upper bound for :math:`\text{UCB}(a, t)`
(for logarithmic regret bounds, `C` must equal 1), and :math:`N_t(a)`
is the number of times arm `a` has been pulled during the previous `t -
1` timesteps.
References
----------
.. [*] Auer, P., Cesa-Bianchi, N., & Fischer, P. (2002). Finite-time
analysis of the multiarmed bandit problem. *Machine Learning,
47(2)*.
Parameters
----------
C : float in (0, +infinity)
A confidence/optimisim parameter affecting the degree of
exploration, where larger values encourage greater exploration. The
UCB1 algorithm assumes `C=1`. Default is 1.
ev_prior : float
The starting expected value for each arm before any data has been
observed. Default is 0.5.
"""
self.C = C
self.ev_prior = ev_prior
super().__init__()
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {"ev_estimates": self.ev_estimates}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"C": self.C,
"id": "UCB1",
"ev_prior": self.ev_prior,
}
def _initialize_params(self, bandit):
"""
Initialize any policy-specific parameters that depend on information
from the bandit environment.
"""
self.ev_estimates = {i: self.ev_prior for i in range(bandit.n_arms)}
self.is_initialized = True
def _select_arm(self, bandit, context=None):
# add eps to avoid divide-by-zero errors on the first pull of each arm
eps = np.finfo(float).eps
N, T = bandit.n_arms, self.step + 1
E, C = self.ev_estimates, self.pull_counts
scores = [E[a] + self.C * np.sqrt(np.log(T) / (C[a] + eps)) for a in range(N)]
return np.argmax(scores)
def _update_params(self, arm_id, reward, context=None):
E, C = self.ev_estimates, self.pull_counts
C[arm_id] += 1
E[arm_id] += (reward - E[arm_id]) / (C[arm_id])
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public :method:`reset` method.
"""
self.ev_estimates = {}
self.pull_counts = defaultdict(lambda: 0)
class Edge(object):
def __init__(self, fr, to, w=None):
"""
A generic directed edge object.
Parameters
----------
fr: int
The id of the vertex the edge goes from
to: int
The id of the vertex the edge goes to
w: float, :class:`Object` instance, or None
The edge weight, if applicable. If weight is an arbitrary Object it
must have a method called 'sample' which takes no arguments and
returns a random sample from the weight distribution. If `w` is
None, no weight is assumed. Default is None.
"""
self.fr = fr
self.to = to
self._w = w
def __repr__(self):
return "{} -> {}, weight: {}".format(self.fr, self.to, self._w)
def weight(self):
return self._w.sample() if hasattr(self._w, "sample") else self._w
def reverse(self):
"""Reverse the edge direction"""
return Edge(self.t, self.f, self.w)
class DiGraph(Graph):
def __init__(self, V, E):
"""
A generic directed graph object.
Parameters
----------
V : list
A list of vertex IDs.
E : list of :class:`Edge <numpy_ml.utils.graphs.Edge>` objects
A list of directed edges connecting pairs of vertices in ``V``.
"""
super().__init__(V, E)
self.is_directed = True
self._topological_ordering = []
def _build_adjacency_list(self):
"""Encode directed graph as an adjancency list"""
# assumes no parallel edges
for e in self.edges:
fr_i = self._V2I[e.fr]
self._G[fr_i].add(e)
def reverse(self):
"""Reverse the direction of all edges in the graph"""
return DiGraph(self.vertices, [e.reverse() for e in self.edges])
def topological_ordering(self):
"""
Returns a (non-unique) topological sort / linearization of the nodes
IFF the graph is acyclic, otherwise returns None.
Notes
-----
A topological sort is an ordering on the nodes in `G` such that for every
directed edge :math:`u \\rightarrow v` in the graph, `u` appears before
`v` in the ordering. The topological ordering is produced by ordering
the nodes in `G` by their DFS "last visit time," from greatest to
smallest.
This implementation follows a recursive, DFS-based approach [1]_ which
may break if the graph is very large. For an iterative version, see
Khan's algorithm [2]_ .
References
----------
.. [1] Tarjan, R. (1976), Edge-disjoint spanning trees and depth-first
search, *Acta Informatica, 6 (2)*: 171–185.
.. [2] Kahn, A. (1962), Topological sorting of large networks,
*Communications of the ACM, 5 (11)*: 558–562.
Returns
-------
ordering : list or None
A topoligical ordering of the vertex indices if the graph is a DAG,
otherwise None.
"""
ordering = []
visited = set()
def dfs(v_i, path=None):
"""A simple DFS helper routine"""
path = set([v_i]) if path is None else path
for nbr_i in self.get_neighbors(v_i):
if nbr_i in path:
return True # cycle detected!
elif nbr_i not in visited:
visited.add(nbr_i)
path.add(nbr_i)
is_cyclic = dfs(nbr_i, path)
if is_cyclic:
return True
# insert to the beginning of the ordering
ordering.insert(0, v_i)
path -= set([v_i])
return False
for s_i in self.indices:
if s_i not in visited:
visited.add(s_i)
is_cyclic = dfs(s_i)
if is_cyclic:
return None
return ordering
def is_acyclic(self):
"""Check whether the graph contains cycles"""
return self.topological_ordering() is not None
def random_DAG(n_vertices, edge_prob=0.5):
"""
Create a 'random' unweighted directed acyclic graph by pruning all the
backward connections from a random graph.
Parameters
----------
n_vertices : int
The number of vertices in the graph.
edge_prob : float in [0, 1]
The probability of forming an edge between two vertices in the
underlying random graph, before edge pruning. Default is 0.5.
Returns
-------
G : :class:`Graph` instance
The resulting DAG.
"""
G = random_unweighted_graph(n_vertices, edge_prob, directed=True)
# prune edges to remove backwards connections between vertices
G = DiGraph(G.vertices, [e for e in G.edges if e.fr < e.to])
# if we pruned away all the edges, generate a new graph
while not len(G.edges):
G = random_unweighted_graph(n_vertices, edge_prob, directed=True)
G = DiGraph(G.vertices, [e for e in G.edges if e.fr < e.to])
return G
The provided code snippet includes necessary dependencies for implementing the `plot_ucb1_gaussian_shortest_path` function. Write a Python function `def plot_ucb1_gaussian_shortest_path()` to solve the following problem:
Plot the UCB1 policy on a graph shortest path problem each edge weight drawn from an independent univariate Gaussian
Here is the function:
def plot_ucb1_gaussian_shortest_path():
"""
Plot the UCB1 policy on a graph shortest path problem each edge weight
drawn from an independent univariate Gaussian
"""
np.random.seed(12345)
ep_length = 1
n_duplicates = 5
n_episodes = 5000
p = np.random.rand()
n_vertices = np.random.randint(5, 15)
Gaussian = namedtuple("Gaussian", ["mean", "variance", "EV", "sample"])
# create randomly-weighted edges
print("Building graph")
E = []
G = random_DAG(n_vertices, p)
V = G.vertices
for e in G.edges:
mean, var = np.random.uniform(0, 1), np.random.uniform(0, 1)
w = lambda: np.random.normal(mean, var) # noqa: E731
rv = Gaussian(mean, var, mean, w)
E.append(Edge(e.fr, e.to, rv))
G = DiGraph(V, E)
while not G.path_exists(V[0], V[-1]):
print("Skipping")
idx = np.random.randint(0, len(V))
V[idx], V[-1] = V[-1], V[idx]
mab = ShortestPathBandit(G, V[0], V[-1])
policy = UCB1(C=1, ev_prior=0.5)
policy = BanditTrainer().train(policy, mab, ep_length, n_episodes, n_duplicates) | Plot the UCB1 policy on a graph shortest path problem each edge weight drawn from an independent univariate Gaussian |
18,166 | from collections import namedtuple
import numpy as np
from numpy_ml.bandits import (
MultinomialBandit,
BernoulliBandit,
ShortestPathBandit,
ContextualLinearBandit,
)
from numpy_ml.bandits.trainer import BanditTrainer
from numpy_ml.bandits.policies import (
EpsilonGreedy,
UCB1,
ThompsonSamplingBetaBinomial,
LinUCB,
)
from numpy_ml.utils.graphs import random_DAG, DiGraph, Edge
def random_bernoulli_mab(n_arms=10):
"""Generate a random Bernoulli multi-armed bandit environemt"""
p = np.random.uniform(size=n_arms)
payoff_probs = p / p.sum()
return BernoulliBandit(payoff_probs)
class BanditTrainer:
def __init__(self):
"""
An object to facilitate multi-armed bandit training, comparison, and
evaluation.
"""
self.logs = {}
def compare(
self,
policies,
bandit,
n_trials,
n_duplicates,
plot=True,
seed=None,
smooth_weight=0.999,
out_dir=None,
):
"""
Compare the performance of multiple policies on the same bandit
environment, generating a plot for each.
Parameters
----------
policies : list of :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instances
The multi-armed bandit policies to compare.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to train the policies on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of times to evaluate each policy on the bandit
environment. Larger values permit a better estimate of the
variance in payoff / cumulative regret for each policy.
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
seed : int
The seed for the random number generator. Default is None.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
""" # noqa: E501
self.init_logs(policies)
all_axes = [None] * len(policies)
if plot and _PLOTTING:
fig, all_axes = plt.subplots(len(policies), 2, sharex=True)
fig.set_size_inches(10.5, len(policies) * 5.25)
for policy, axes in zip(policies, all_axes):
if seed:
np.random.seed(seed)
bandit.reset()
policy.reset()
self.train(
policy,
bandit,
n_trials,
n_duplicates,
axes=axes,
plot=plot,
verbose=False,
out_dir=out_dir,
smooth_weight=smooth_weight,
)
# enforce the same y-ranges across plots for straightforward comparison
a1_r, a2_r = zip(*[(a1.get_ylim(), a2.get_ylim()) for (a1, a2) in all_axes])
a1_min = min(a1_r, key=lambda x: x[0])[0]
a1_max = max(a1_r, key=lambda x: x[1])[1]
a2_min = min(a2_r, key=lambda x: x[0])[0]
a2_max = max(a2_r, key=lambda x: x[1])[1]
for (a1, a2) in all_axes:
a1.set_ylim(a1_min, a1_max)
a2.set_ylim(a2_min, a2_max)
if plot and _PLOTTING:
if out_dir is not None:
plt.savefig(op.join(out_dir, "bandit_comparison.png"), dpi=300)
plt.show()
def train(
self,
policy,
bandit,
n_trials,
n_duplicates,
plot=True,
axes=None,
verbose=True,
print_every=100,
smooth_weight=0.999,
out_dir=None,
):
"""
Train a MAB policies on a multi-armed bandit problem, logging training
statistics along the way.
Parameters
----------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The multi-armed bandit policy to train.
bandit : :class:`Bandit <numpy_ml.bandits.bandits.Bandit>` instance
The environment to run the policy on.
n_trials : int
The number of trials per run.
n_duplicates: int
The number of runs to evaluate
plot : bool
Whether to generate a plot of the policy's average reward and
regret across the episodes. Default is True.
axes : list of :py:class:`Axis <matplotlib.axes.Axis>` instances or None
If not None and ``plot = True``, these are the axes that will be
used to plot the cumulative reward and regret, respectively.
Default is None.
verbose : boolean
Whether to print run statistics during training. Default is True.
print_every : int
The number of episodes to run before printing loss values to
stdout. This is ignored if ``verbose`` is false. Default is 100.
smooth_weight : float in [0, 1]
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. Default is
0.999.
out_dir : str or None
Plots will be saved to this directory if `plot` is True. If
`out_dir` is None, plots will not be saved. Default is None.
Returns
-------
policy : :class:`BanditPolicyBase <numpy_ml.bandits.policies.BanditPolicyBase>` instance
The policy trained during the last (i.e. most recent) duplicate
run.
""" # noqa: E501
if not str(policy) in self.logs:
self.init_logs(policy)
p = str(policy)
D, L = n_duplicates, self.logs
for d in range(D):
if verbose:
print("\nDUPLICATE {}/{}\n".format(d + 1, D))
bandit.reset()
policy.reset()
avg_oracle_reward, cregret = 0, 0
for trial_id in range(n_trials):
rwd, arm, orwd, oarm = self._train_step(bandit, policy)
loss = mse(bandit, policy)
regret = orwd - rwd
avg_oracle_reward += orwd
cregret += regret
L[p]["mse"][trial_id + 1].append(loss)
L[p]["reward"][trial_id + 1].append(rwd)
L[p]["regret"][trial_id + 1].append(regret)
L[p]["cregret"][trial_id + 1].append(cregret)
L[p]["optimal_arm"][trial_id + 1].append(oarm)
L[p]["selected_arm"][trial_id + 1].append(arm)
L[p]["optimal_reward"][trial_id + 1].append(orwd)
if (trial_id + 1) % print_every == 0 and verbose:
fstr = "Trial {}/{}, {}/{}, Regret: {:.4f}"
print(fstr.format(trial_id + 1, n_trials, d + 1, D, regret))
avg_oracle_reward /= n_trials
if verbose:
self._print_run_summary(bandit, policy, regret)
if plot and _PLOTTING:
self._plot_reward(avg_oracle_reward, policy, smooth_weight, axes, out_dir)
return policy
def _train_step(self, bandit, policy):
P, B = policy, bandit
C = B.get_context() if hasattr(B, "get_context") else None
rwd, arm = P.act(B, C)
oracle_rwd, oracle_arm = B.oracle_payoff(C)
return rwd, arm, oracle_rwd, oracle_arm
def init_logs(self, policies):
"""
Initialize the episode logs.
Notes
-----
Training logs are represented as a nested set of dictionaries with the
following structure:
log[model_id][metric][trial_number][duplicate_number]
For example, ``logs['model1']['regret'][3][1]`` holds the regret value
accrued on the 3rd trial of the 2nd duplicate run for model1.
Available fields are 'regret', 'cregret' (cumulative regret), 'reward',
'mse' (mean-squared error between estimated arm EVs and the true EVs),
'optimal_arm', 'selected_arm', and 'optimal_reward'.
"""
if not isinstance(policies, list):
policies = [policies]
self.logs = {
str(p): {
"mse": defaultdict(lambda: []),
"regret": defaultdict(lambda: []),
"reward": defaultdict(lambda: []),
"cregret": defaultdict(lambda: []),
"optimal_arm": defaultdict(lambda: []),
"selected_arm": defaultdict(lambda: []),
"optimal_reward": defaultdict(lambda: []),
}
for p in policies
}
def _print_run_summary(self, bandit, policy, regret):
if not hasattr(policy, "ev_estimates") or len(policy.ev_estimates) == 0:
return None
evs, se = bandit.arm_evs, []
fstr = "Arm {}: {:.4f} v. {:.4f}"
ests = sorted(policy.ev_estimates.items(), key=lambda x: x[0])
print("\n\nEstimated vs. Real EV\n" + "-" * 21)
for ix, (est, ev) in enumerate(zip(ests, evs)):
print(fstr.format(ix + 1, est[1], ev))
se.append((est[1] - ev) ** 2)
fstr = "\nFinal MSE: {:.4f}\nFinal Regret: {:.4f}\n\n"
print(fstr.format(np.mean(se), regret))
def _plot_reward(self, optimal_rwd, policy, smooth_weight, axes=None, out_dir=None):
L = self.logs[str(policy)]
smds = self._smoothed_metrics(policy, optimal_rwd, smooth_weight)
if axes is None:
fig, [ax1, ax2] = plt.subplots(1, 2)
else:
assert len(axes) == 2
ax1, ax2 = axes
e_ids = range(1, len(L["reward"]) + 1)
plot_params = [[ax1, ax2], ["reward", "cregret"], ["b", "r"], [optimal_rwd, 0]]
for (ax, m, c, opt) in zip(*plot_params):
avg, std = "sm_{}_avg sm_{}_std".format(m, m).split()
ax.plot(e_ids, smds[avg], color=c)
ax.axhline(opt, 0, 1, color=c, ls="--")
ax.fill_between(
e_ids,
smds[avg] + smds[std],
smds[avg] - smds[std],
color=c,
alpha=0.25,
)
ax.set_xlabel("Trial")
m = "Cumulative Regret" if m == "cregret" else m
ax.set_ylabel("Smoothed Avg. {}".format(m.title()))
if axes is None:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
if axes is not None:
ax.set_title(str(policy))
if axes is None:
fig.suptitle(str(policy))
fig.tight_layout()
if out_dir is not None:
bid = policy.hyperparameters["id"]
plt.savefig(op.join(out_dir, f"{bid}.png"), dpi=300)
plt.show()
return ax1, ax2
def _smoothed_metrics(self, policy, optimal_rwd, smooth_weight):
L = self.logs[str(policy)]
# pre-allocate smoothed data structure
smds = {}
for m in L.keys():
if m == "selections":
continue
smds["sm_{}_avg".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_avg".format(m)][0] = np.mean(L[m][1])
smds["sm_{}_std".format(m)] = np.zeros(len(L["reward"]))
smds["sm_{}_std".format(m)][0] = np.std(L[m][1])
smoothed = {m: L[m][1] for m in L.keys()}
for e_id in range(2, len(L["reward"]) + 1):
for m in L.keys():
if m == "selections":
continue
prev, cur = smoothed[m], L[m][e_id]
smoothed[m] = [smooth(p, c, smooth_weight) for p, c in zip(prev, cur)]
smds["sm_{}_avg".format(m)][e_id - 1] = np.mean(smoothed[m])
smds["sm_{}_std".format(m)][e_id - 1] = np.std(smoothed[m])
return smds
class EpsilonGreedy(BanditPolicyBase):
def __init__(self, epsilon=0.05, ev_prior=0.5):
r"""
An epsilon-greedy policy for multi-armed bandit problems.
Notes
-----
Epsilon-greedy policies greedily select the arm with the highest
expected payoff with probability :math:`1-\epsilon`, and selects an arm
uniformly at random with probability :math:`\epsilon`:
.. math::
P(a) = \left\{
\begin{array}{lr}
\epsilon / N + (1 - \epsilon) &\text{if }
a = \arg \max_{a' \in \mathcal{A}}
\mathbb{E}_{q_{\hat{\theta}}}[r \mid a']\\
\epsilon / N &\text{otherwise}
\end{array}
\right.
where :math:`N = |\mathcal{A}|` is the number of arms,
:math:`q_{\hat{\theta}}` is the estimate of the arm payoff
distribution under current model parameters :math:`\hat{\theta}`, and
:math:`\mathbb{E}_{q_{\hat{\theta}}}[r \mid a']` is the expected
reward under :math:`q_{\hat{\theta}}` of receiving reward `r` after
taking action :math:`a'`.
Parameters
----------
epsilon : float in [0, 1]
The probability of taking a random action. Default is 0.05.
ev_prior : float
The starting expected payoff for each arm before any data has been
observed. Default is 0.5.
"""
super().__init__()
self.epsilon = epsilon
self.ev_prior = ev_prior
self.pull_counts = defaultdict(lambda: 0)
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {"ev_estimates": self.ev_estimates}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"id": "EpsilonGreedy",
"epsilon": self.epsilon,
"ev_prior": self.ev_prior,
}
def _initialize_params(self, bandit):
"""
Initialize any policy-specific parameters that depend on information
from the bandit environment.
"""
self.ev_estimates = {i: self.ev_prior for i in range(bandit.n_arms)}
self.is_initialized = True
def _select_arm(self, bandit, context=None):
if np.random.rand() < self.epsilon:
arm_id = np.random.choice(bandit.n_arms)
else:
ests = self.ev_estimates
(arm_id, _) = max(ests.items(), key=lambda x: x[1])
return arm_id
def _update_params(self, arm_id, reward, context=None):
E, C = self.ev_estimates, self.pull_counts
C[arm_id] += 1
E[arm_id] += (reward - E[arm_id]) / (C[arm_id])
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public `self.reset()` method.
"""
self.ev_estimates = {}
self.pull_counts = defaultdict(lambda: 0)
class UCB1(BanditPolicyBase):
def __init__(self, C=1, ev_prior=0.5):
r"""
A UCB1 policy for multi-armed bandit problems.
Notes
-----
The UCB1 algorithm [*]_ guarantees the cumulative regret is bounded by log
`t`, where `t` is the current timestep. To make this guarantee UCB1
assumes all arm payoffs are between 0 and 1.
Under UCB1, the upper confidence bound on the expected value for
pulling arm `a` at timestep `t` is:
.. math::
\text{UCB}(a, t) = \text{EV}_t(a) + C \sqrt{\frac{2 \log t}{N_t(a)}}
where :math:`\text{EV}_t(a)` is the average of the rewards recieved so
far from pulling arm `a`, `C` is a free parameter controlling the
"optimism" of the confidence upper bound for :math:`\text{UCB}(a, t)`
(for logarithmic regret bounds, `C` must equal 1), and :math:`N_t(a)`
is the number of times arm `a` has been pulled during the previous `t -
1` timesteps.
References
----------
.. [*] Auer, P., Cesa-Bianchi, N., & Fischer, P. (2002). Finite-time
analysis of the multiarmed bandit problem. *Machine Learning,
47(2)*.
Parameters
----------
C : float in (0, +infinity)
A confidence/optimisim parameter affecting the degree of
exploration, where larger values encourage greater exploration. The
UCB1 algorithm assumes `C=1`. Default is 1.
ev_prior : float
The starting expected value for each arm before any data has been
observed. Default is 0.5.
"""
self.C = C
self.ev_prior = ev_prior
super().__init__()
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {"ev_estimates": self.ev_estimates}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"C": self.C,
"id": "UCB1",
"ev_prior": self.ev_prior,
}
def _initialize_params(self, bandit):
"""
Initialize any policy-specific parameters that depend on information
from the bandit environment.
"""
self.ev_estimates = {i: self.ev_prior for i in range(bandit.n_arms)}
self.is_initialized = True
def _select_arm(self, bandit, context=None):
# add eps to avoid divide-by-zero errors on the first pull of each arm
eps = np.finfo(float).eps
N, T = bandit.n_arms, self.step + 1
E, C = self.ev_estimates, self.pull_counts
scores = [E[a] + self.C * np.sqrt(np.log(T) / (C[a] + eps)) for a in range(N)]
return np.argmax(scores)
def _update_params(self, arm_id, reward, context=None):
E, C = self.ev_estimates, self.pull_counts
C[arm_id] += 1
E[arm_id] += (reward - E[arm_id]) / (C[arm_id])
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public :method:`reset` method.
"""
self.ev_estimates = {}
self.pull_counts = defaultdict(lambda: 0)
class ThompsonSamplingBetaBinomial(BanditPolicyBase):
def __init__(self, alpha=1, beta=1):
r"""
A conjugate Thompson sampling [1]_ [2]_ policy for multi-armed bandits with
Bernoulli likelihoods.
Notes
-----
The policy assumes independent Beta priors on the Bernoulli arm payoff
probabilities, :math:`\theta`:
.. math::
\theta_k \sim \text{Beta}(\alpha_k, \beta_k) \\
r \mid \theta_k \sim \text{Bernoulli}(\theta_k)
where :math:`k \in \{1,\ldots,K \}` indexes arms in the MAB and
:math:`\theta_k` is the parameter of the Bernoulli likelihood for arm
`k`. The sampler begins by selecting an arm with probability
proportional to its payoff probability under the initial Beta prior.
After pulling the sampled arm and receiving a reward, `r`, the sampler
computes the posterior over the model parameters (arm payoffs) via
Bayes' rule, and then samples a new action in proportion to its payoff
probability under this posterior. This process (i.e., sample action
from posterior, take action and receive reward, compute updated
posterior) is repeated until the number of trials is exhausted.
Note that due to the conjugacy between the Beta prior and Bernoulli
likelihood the posterior for each arm will also be Beta-distributed and
can computed and sampled from efficiently:
.. math::
\theta_k \mid r \sim \text{Beta}(\alpha_k + r, \beta_k + 1 - r)
References
----------
.. [1] Thompson, W. (1933). On the likelihood that one unknown
probability exceeds another in view of the evidence of two samples.
*Biometrika, 25(3/4)*, 285-294.
.. [2] Chapelle, O., & Li, L. (2011). An empirical evaluation of
Thompson sampling. *Advances in Neural Information Processing
Systems, 24*, 2249-2257.
Parameters
----------
alpha : float or list of length `K`
Parameter for the Beta prior on arm payouts. If a float, this value
will be used in the prior for all of the `K` arms.
beta : float or list of length `K`
Parameter for the Beta prior on arm payouts. If a float, this value
will be used in the prior for all of the `K` arms.
"""
super().__init__()
self.alphas, self.betas = [], []
self.alpha, self.beta = alpha, beta
self.is_initialized = False
def parameters(self):
"""A dictionary containing the current policy parameters"""
return {
"ev_estimates": self.ev_estimates,
"alphas": self.alphas,
"betas": self.betas,
}
def hyperparameters(self):
"""A dictionary containing the policy hyperparameters"""
return {
"id": "ThompsonSamplingBetaBinomial",
"alpha": self.alpha,
"beta": self.beta,
}
def _initialize_params(self, bandit):
bhp = bandit.hyperparameters
fstr = "ThompsonSamplingBetaBinomial only defined for BernoulliBandit, got: {}"
assert bhp["id"] == "BernoulliBandit", fstr.format(bhp["id"])
# initialize the model prior
if is_number(self.alpha):
self.alphas = [self.alpha] * bandit.n_arms
if is_number(self.beta):
self.betas = [self.beta] * bandit.n_arms
assert len(self.alphas) == len(self.betas) == bandit.n_arms
self.ev_estimates = {i: self._map_estimate(i, 1) for i in range(bandit.n_arms)}
self.is_initialized = True
def _select_arm(self, bandit, context):
if not self.is_initialized:
self._initialize_prior(bandit)
# draw a sample from the current model posterior
posterior_sample = np.random.beta(self.alphas, self.betas)
# greedily select an action based on this sample
return np.argmax(posterior_sample)
def _update_params(self, arm_id, rwd, context):
"""
Compute the parameters of the Beta posterior, P(payoff prob | rwd),
for arm `arm_id`.
"""
self.alphas[arm_id] += rwd
self.betas[arm_id] += 1 - rwd
self.ev_estimates[arm_id] = self._map_estimate(arm_id, rwd)
def _map_estimate(self, arm_id, rwd):
"""Compute the current MAP estimate for an arm's payoff probability"""
A, B = self.alphas, self.betas
if A[arm_id] > 1 and B[arm_id] > 1:
map_payoff_prob = (A[arm_id] - 1) / (A[arm_id] + B[arm_id] - 2)
elif A[arm_id] < 1 and B[arm_id] < 1:
map_payoff_prob = rwd # 0 or 1 equally likely, make a guess
elif A[arm_id] <= 1 and B[arm_id] > 1:
map_payoff_prob = 0
elif A[arm_id] > 1 and B[arm_id] <= 1:
map_payoff_prob = 1
else:
map_payoff_prob = 0.5
return map_payoff_prob
def _reset_params(self):
"""
Reset any model-specific parameters. This gets called within the
public `self.reset()` method.
"""
self.alphas, self.betas = [], []
self.ev_estimates = {}
The provided code snippet includes necessary dependencies for implementing the `plot_comparison` function. Write a Python function `def plot_comparison()` to solve the following problem:
Use the BanditTrainer to compare several policies on the same bandit problem
Here is the function:
def plot_comparison():
"""
Use the BanditTrainer to compare several policies on the same bandit
problem
"""
np.random.seed(1234)
ep_length = 1
K = 10
n_duplicates = 5
n_episodes = 5000
cmab = random_bernoulli_mab(n_arms=K)
policy1 = EpsilonGreedy(epsilon=0.05, ev_prior=0.5)
policy2 = UCB1(C=1, ev_prior=0.5)
policy3 = ThompsonSamplingBetaBinomial(alpha=1, beta=1)
policies = [policy1, policy2, policy3]
BanditTrainer().compare(
policies, cmab, ep_length, n_episodes, n_duplicates,
) | Use the BanditTrainer to compare several policies on the same bandit problem |
18,167 | import warnings
from itertools import product
from collections import defaultdict
import numpy as np
from numpy_ml.utils.testing import DependencyWarning
from numpy_ml.rl_models.tiles.tiles3 import tiles, IHT
class IHT:
"Structure to handle collisions"
def __init__(self, sizeval):
self.size = sizeval
self.overfullCount = 0
self.dictionary = {}
def __str__(self):
"Prepares a string for printing whenever this object is printed"
return (
"Collision table:"
+ " size:"
+ str(self.size)
+ " overfullCount:"
+ str(self.overfullCount)
+ " dictionary:"
+ str(len(self.dictionary))
+ " items"
)
def count(self):
return len(self.dictionary)
def fullp(self):
return len(self.dictionary) >= self.size
def getindex(self, obj, readonly=False):
d = self.dictionary
if obj in d:
return d[obj]
elif readonly:
return None
size = self.size
count = self.count()
if count >= size:
if self.overfullCount == 0:
print("IHT full, starting to allow collisions")
self.overfullCount += 1
return basehash(obj) % self.size
else:
d[obj] = count
return count
def tiles(ihtORsize, numtilings, floats, ints=[], readonly=False):
"""returns num-tilings tile indices corresponding to the floats and ints"""
qfloats = [floor(f * numtilings) for f in floats]
Tiles = []
for tiling in range(numtilings):
tilingX2 = tiling * 2
coords = [tiling]
b = tiling
for q in qfloats:
coords.append((q + b) // numtilings)
b += tilingX2
coords.extend(ints)
Tiles.append(hashcoords(coords, ihtORsize, readonly))
return Tiles
The provided code snippet includes necessary dependencies for implementing the `tile_state_space` function. Write a Python function `def tile_state_space( env, env_stats, n_tilings, obs_max=None, obs_min=None, state_action=False, grid_size=(4, 4), )` to solve the following problem:
Return a function to encode the continous observations generated by `env` in terms of a collection of `n_tilings` overlapping tilings (each with dimension `grid_size`) of the state space. Arguments --------- env : ``gym.wrappers.time_limit.TimeLimit`` instance An openAI environment. n_tilings : int The number of overlapping tilings to use. Should be a power of 2. This determines the dimension of the discretized tile-encoded state vector. obs_max : float or np.ndarray The value to treat as the max value of the observation space when calculating the grid widths. If None, use ``env.observation_space.high``. Default is None. obs_min : float or np.ndarray The value to treat as the min value of the observation space when calculating the grid widths. If None, use ``env.observation_space.low``. Default is None. state_action : bool Whether to use tile coding to encode state-action values (True) or just state values (False). Default is False. grid_size : list of length 2 A list of ints representing the coarseness of the tilings. E.g., a `grid_size` of [4, 4] would mean each tiling consisted of a 4x4 tile grid. Default is [4, 4]. Returns ------- encode_obs_as_tile : function A function which takes as input continous observation vector and returns a set of the indices of the active tiles in the tile coded observation space. n_states : int An integer reflecting the total number of unique states possible under this tile coding regimen.
Here is the function:
def tile_state_space(
env,
env_stats,
n_tilings,
obs_max=None,
obs_min=None,
state_action=False,
grid_size=(4, 4),
):
"""
Return a function to encode the continous observations generated by `env`
in terms of a collection of `n_tilings` overlapping tilings (each with
dimension `grid_size`) of the state space.
Arguments
---------
env : ``gym.wrappers.time_limit.TimeLimit`` instance
An openAI environment.
n_tilings : int
The number of overlapping tilings to use. Should be a power of 2. This
determines the dimension of the discretized tile-encoded state vector.
obs_max : float or np.ndarray
The value to treat as the max value of the observation space when
calculating the grid widths. If None, use
``env.observation_space.high``. Default is None.
obs_min : float or np.ndarray
The value to treat as the min value of the observation space when
calculating the grid widths. If None, use
``env.observation_space.low``. Default is None.
state_action : bool
Whether to use tile coding to encode state-action values (True) or just
state values (False). Default is False.
grid_size : list of length 2
A list of ints representing the coarseness of the tilings. E.g., a
`grid_size` of [4, 4] would mean each tiling consisted of a 4x4 tile
grid. Default is [4, 4].
Returns
-------
encode_obs_as_tile : function
A function which takes as input continous observation vector and
returns a set of the indices of the active tiles in the tile coded
observation space.
n_states : int
An integer reflecting the total number of unique states possible under
this tile coding regimen.
"""
obs_max = np.nan_to_num(env.observation_space.high) if obs_max is None else obs_max
obs_min = np.nan_to_num(env.observation_space.low) if obs_min is None else obs_min
if state_action:
if env_stats["tuple_action"]:
n = [space.n - 1.0 for space in env.action_spaces.spaces]
else:
n = [env.action_space.n]
obs_max = np.concatenate([obs_max, n])
obs_min = np.concatenate([obs_min, np.zeros_like(n)])
obs_range = obs_max - obs_min
scale = 1.0 / obs_range
# scale (state-)observation vector
scale_obs = lambda obs: obs * scale # noqa: E731
n_tiles = np.prod(grid_size) * n_tilings
n_states = np.prod([n_tiles - i for i in range(n_tilings)])
iht = IHT(16384)
def encode_obs_as_tile(obs):
obs = scale_obs(obs)
return tuple(tiles(iht, n_tilings, obs))
return encode_obs_as_tile, n_states | Return a function to encode the continous observations generated by `env` in terms of a collection of `n_tilings` overlapping tilings (each with dimension `grid_size`) of the state space. Arguments --------- env : ``gym.wrappers.time_limit.TimeLimit`` instance An openAI environment. n_tilings : int The number of overlapping tilings to use. Should be a power of 2. This determines the dimension of the discretized tile-encoded state vector. obs_max : float or np.ndarray The value to treat as the max value of the observation space when calculating the grid widths. If None, use ``env.observation_space.high``. Default is None. obs_min : float or np.ndarray The value to treat as the min value of the observation space when calculating the grid widths. If None, use ``env.observation_space.low``. Default is None. state_action : bool Whether to use tile coding to encode state-action values (True) or just state values (False). Default is False. grid_size : list of length 2 A list of ints representing the coarseness of the tilings. E.g., a `grid_size` of [4, 4] would mean each tiling consisted of a 4x4 tile grid. Default is [4, 4]. Returns ------- encode_obs_as_tile : function A function which takes as input continous observation vector and returns a set of the indices of the active tiles in the tile coded observation space. n_states : int An integer reflecting the total number of unique states possible under this tile coding regimen. |
18,168 | import warnings
from itertools import product
from collections import defaultdict
import numpy as np
from numpy_ml.utils.testing import DependencyWarning
from numpy_ml.rl_models.tiles.tiles3 import tiles, IHT
try:
import gym
except ModuleNotFoundError:
fstr = (
"Agents in `numpy_ml.rl_models` use the OpenAI gym for training. "
"To install the gym environments, run `pip install gym`. For more"
" information, see https://github.com/openai/gym."
)
warnings.warn(fstr, DependencyWarning)
The provided code snippet includes necessary dependencies for implementing the `get_gym_environs` function. Write a Python function `def get_gym_environs()` to solve the following problem:
List all valid OpenAI ``gym`` environment ids
Here is the function:
def get_gym_environs():
"""List all valid OpenAI ``gym`` environment ids"""
return [e.id for e in gym.envs.registry.all()] | List all valid OpenAI ``gym`` environment ids |
18,169 | import warnings
from itertools import product
from collections import defaultdict
import numpy as np
from numpy_ml.utils.testing import DependencyWarning
from numpy_ml.rl_models.tiles.tiles3 import tiles, IHT
NO_PD = False
try:
import gym
except ModuleNotFoundError:
fstr = (
"Agents in `numpy_ml.rl_models` use the OpenAI gym for training. "
"To install the gym environments, run `pip install gym`. For more"
" information, see https://github.com/openai/gym."
)
warnings.warn(fstr, DependencyWarning)
def env_stats(env):
"""
Compute statistics for the current environment.
Parameters
----------
env : ``gym.wrappers`` or ``gym.envs`` instance
The environment to evaluate.
Returns
-------
env_info : dict
A dictionary containing information about the action and observation
spaces of `env`.
"""
md_action, md_obs, tuple_action, tuple_obs = is_multidimensional(env)
cont_action, cont_obs = is_continuous(env, tuple_action, tuple_obs)
n_actions_per_dim, action_ids, action_dim = action_stats(
env, md_action, cont_action,
)
n_obs_per_dim, obs_ids, obs_dim = obs_stats(env, md_obs, cont_obs)
env_info = {
"id": env.spec.id,
"seed": env.spec.seed if "seed" in dir(env.spec) else None,
"deterministic": bool(~env.spec.nondeterministic),
"tuple_actions": tuple_action,
"tuple_observations": tuple_obs,
"multidim_actions": md_action,
"multidim_observations": md_obs,
"continuous_actions": cont_action,
"continuous_observations": cont_obs,
"n_actions_per_dim": n_actions_per_dim,
"action_dim": action_dim,
"n_obs_per_dim": n_obs_per_dim,
"obs_dim": obs_dim,
"action_ids": action_ids,
"obs_ids": obs_ids,
}
return env_info
The provided code snippet includes necessary dependencies for implementing the `get_gym_stats` function. Write a Python function `def get_gym_stats()` to solve the following problem:
Return a pandas DataFrame of the environment IDs.
Here is the function:
def get_gym_stats():
"""Return a pandas DataFrame of the environment IDs."""
df = []
for e in gym.envs.registry.all():
print(e.id)
df.append(env_stats(gym.make(e.id)))
cols = [
"id",
"continuous_actions",
"continuous_observations",
"action_dim",
# "action_ids",
"deterministic",
"multidim_actions",
"multidim_observations",
"n_actions_per_dim",
"n_obs_per_dim",
"obs_dim",
# "obs_ids",
"seed",
"tuple_actions",
"tuple_observations",
]
return df if NO_PD else pd.DataFrame(df)[cols] | Return a pandas DataFrame of the environment IDs. |
18,170 | from math import floor
from itertools import zip_longest
def hashcoords(coordinates, m, readonly=False):
if type(m) == IHT:
return m.getindex(tuple(coordinates), readonly)
if type(m) == int:
return basehash(tuple(coordinates)) % m
if m == None:
return coordinates
The provided code snippet includes necessary dependencies for implementing the `tileswrap` function. Write a Python function `def tileswrap(ihtORsize, numtilings, floats, wrapwidths, ints=[], readonly=False)` to solve the following problem:
returns num-tilings tile indices corresponding to the floats and ints, wrapping some floats
Here is the function:
def tileswrap(ihtORsize, numtilings, floats, wrapwidths, ints=[], readonly=False):
"""returns num-tilings tile indices corresponding to the floats and ints,
wrapping some floats"""
qfloats = [floor(f * numtilings) for f in floats]
Tiles = []
for tiling in range(numtilings):
tilingX2 = tiling * 2
coords = [tiling]
b = tiling
for q, width in zip_longest(qfloats, wrapwidths):
c = (q + b % numtilings) // numtilings
coords.append(c % width if width else c)
b += tilingX2
coords.extend(ints)
Tiles.append(hashcoords(coords, ihtORsize, readonly))
return Tiles | returns num-tilings tile indices corresponding to the floats and ints, wrapping some floats |
18,171 | import warnings
import os.path as op
from collections import defaultdict
import numpy as np
from numpy_ml.utils.testing import DependencyWarning
The provided code snippet includes necessary dependencies for implementing the `get_scriptdir` function. Write a Python function `def get_scriptdir()` to solve the following problem:
Return the directory containing the `trainer.py` script
Here is the function:
def get_scriptdir():
"""Return the directory containing the `trainer.py` script"""
return op.dirname(op.realpath(__file__)) | Return the directory containing the `trainer.py` script |
18,172 | import warnings
import os.path as op
from collections import defaultdict
import numpy as np
from numpy_ml.utils.testing import DependencyWarning
The provided code snippet includes necessary dependencies for implementing the `mse` function. Write a Python function `def mse(bandit, policy)` to solve the following problem:
Computes the mean squared error between a policy's estimates of the expected arm payouts and the true expected payouts.
Here is the function:
def mse(bandit, policy):
"""
Computes the mean squared error between a policy's estimates of the
expected arm payouts and the true expected payouts.
"""
if not hasattr(policy, "ev_estimates") or len(policy.ev_estimates) == 0:
return np.nan
se = []
evs = bandit.arm_evs
ests = sorted(policy.ev_estimates.items(), key=lambda x: x[0])
for ix, (est, ev) in enumerate(zip(ests, evs)):
se.append((est[1] - ev) ** 2)
return np.mean(se) | Computes the mean squared error between a policy's estimates of the expected arm payouts and the true expected payouts. |
18,173 | import warnings
import os.path as op
from collections import defaultdict
import numpy as np
from numpy_ml.utils.testing import DependencyWarning
The provided code snippet includes necessary dependencies for implementing the `smooth` function. Write a Python function `def smooth(prev, cur, weight)` to solve the following problem:
r""" Compute a simple weighted average of the previous and current value. Notes ----- The smoothed value at timestep `t`, :math:`\tilde{X}_t` is calculated as .. math:: \tilde{X}_t = \epsilon \tilde{X}_{t-1} + (1 - \epsilon) X_t where :math:`X_t` is the value at timestep `t`, :math:`\tilde{X}_{t-1}` is the value of the smoothed signal at timestep `t-1`, and :math:`\epsilon` is the smoothing weight. Parameters ---------- prev : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The value of the smoothed signal at the immediately preceding timestep. cur : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The value of the signal at the current timestep weight : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The smoothing weight. Values closer to 0 result in less smoothing, values closer to 1 produce more aggressive smoothing. If weight is an array, each dimension will be interpreted as a separate smoothing weight the corresponding dimension in `cur`. Returns ------- smoothed : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The smoothed signal
Here is the function:
def smooth(prev, cur, weight):
r"""
Compute a simple weighted average of the previous and current value.
Notes
-----
The smoothed value at timestep `t`, :math:`\tilde{X}_t` is calculated as
.. math::
\tilde{X}_t = \epsilon \tilde{X}_{t-1} + (1 - \epsilon) X_t
where :math:`X_t` is the value at timestep `t`, :math:`\tilde{X}_{t-1}` is
the value of the smoothed signal at timestep `t-1`, and :math:`\epsilon` is
the smoothing weight.
Parameters
----------
prev : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The value of the smoothed signal at the immediately preceding
timestep.
cur : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The value of the signal at the current timestep
weight : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The smoothing weight. Values closer to 0 result in less smoothing,
values closer to 1 produce more aggressive smoothing. If weight is an
array, each dimension will be interpreted as a separate smoothing
weight the corresponding dimension in `cur`.
Returns
-------
smoothed : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The smoothed signal
"""
return weight * prev + (1 - weight) * cur | r""" Compute a simple weighted average of the previous and current value. Notes ----- The smoothed value at timestep `t`, :math:`\tilde{X}_t` is calculated as .. math:: \tilde{X}_t = \epsilon \tilde{X}_{t-1} + (1 - \epsilon) X_t where :math:`X_t` is the value at timestep `t`, :math:`\tilde{X}_{t-1}` is the value of the smoothed signal at timestep `t-1`, and :math:`\epsilon` is the smoothing weight. Parameters ---------- prev : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The value of the smoothed signal at the immediately preceding timestep. cur : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The value of the signal at the current timestep weight : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The smoothing weight. Values closer to 0 result in less smoothing, values closer to 1 produce more aggressive smoothing. If weight is an array, each dimension will be interpreted as a separate smoothing weight the corresponding dimension in `cur`. Returns ------- smoothed : float or :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The smoothed signal |
18,174 | import numpy as np
from scipy.special import digamma, polygamma, gammaln
The provided code snippet includes necessary dependencies for implementing the `dg` function. Write a Python function `def dg(gamma, d, t)` to solve the following problem:
E[log X_t] where X_t ~ Dir
Here is the function:
def dg(gamma, d, t):
"""
E[log X_t] where X_t ~ Dir
"""
return digamma(gamma[d, t]) - digamma(np.sum(gamma[d, :])) | E[log X_t] where X_t ~ Dir |
18,175 | import numpy as np
from numpy.lib.stride_tricks import as_strided
from ..utils.windows import WindowInitializer
def nn_interpolate_2D(X, x, y):
"""
Estimates of the pixel values at the coordinates (x, y) in `X` using a
nearest neighbor interpolation strategy.
Notes
-----
Assumes the current entries in `X` reflect equally-spaced samples from a 2D
integer grid.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(in_rows, in_cols, in_channels)`
An input image sampled along a grid of `in_rows` by `in_cols`.
x : list of length `k`
A list of x-coordinates for the samples we wish to generate
y : list of length `k`
A list of y-coordinates for the samples we wish to generate
Returns
-------
samples : :py:class:`ndarray <numpy.ndarray>` of shape `(k, in_channels)`
The samples for each (x,y) coordinate computed via nearest neighbor
interpolation
"""
nx, ny = np.around(x), np.around(y)
nx = np.clip(nx, 0, X.shape[1] - 1).astype(int)
ny = np.clip(ny, 0, X.shape[0] - 1).astype(int)
return X[ny, nx, :]
def bilinear_interpolate(X, x, y):
"""
Estimates of the pixel values at the coordinates (x, y) in `X` via bilinear
interpolation.
Notes
-----
Assumes the current entries in X reflect equally-spaced
samples from a 2D integer grid.
Modified from https://bit.ly/2NMb1Dr
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(in_rows, in_cols, in_channels)`
An input image sampled along a grid of `in_rows` by `in_cols`.
x : list of length `k`
A list of x-coordinates for the samples we wish to generate
y : list of length `k`
A list of y-coordinates for the samples we wish to generate
Returns
-------
samples : list of length `(k, in_channels)`
The samples for each (x,y) coordinate computed via bilinear
interpolation
"""
x0 = np.floor(x).astype(int)
y0 = np.floor(y).astype(int)
x1 = x0 + 1
y1 = y0 + 1
x0 = np.clip(x0, 0, X.shape[1] - 1)
y0 = np.clip(y0, 0, X.shape[0] - 1)
x1 = np.clip(x1, 0, X.shape[1] - 1)
y1 = np.clip(y1, 0, X.shape[0] - 1)
Ia = X[y0, x0, :].T
Ib = X[y1, x0, :].T
Ic = X[y0, x1, :].T
Id = X[y1, x1, :].T
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
return (Ia * wa).T + (Ib * wb).T + (Ic * wc).T + (Id * wd).T
The provided code snippet includes necessary dependencies for implementing the `batch_resample` function. Write a Python function `def batch_resample(X, new_dim, mode="bilinear")` to solve the following problem:
Resample each image (or similar grid-based 2D signal) in a batch to `new_dim` using the specified resampling strategy. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_channels)` An input image volume new_dim : 2-tuple of `(out_rows, out_cols)` The dimension to resample each image to mode : {'bilinear', 'neighbor'} The resampling strategy to employ. Default is 'bilinear'. Returns ------- resampled : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, in_channels)` The resampled image volume.
Here is the function:
def batch_resample(X, new_dim, mode="bilinear"):
"""
Resample each image (or similar grid-based 2D signal) in a batch to
`new_dim` using the specified resampling strategy.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_channels)`
An input image volume
new_dim : 2-tuple of `(out_rows, out_cols)`
The dimension to resample each image to
mode : {'bilinear', 'neighbor'}
The resampling strategy to employ. Default is 'bilinear'.
Returns
-------
resampled : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, in_channels)`
The resampled image volume.
"""
if mode == "bilinear":
interpolate = bilinear_interpolate
elif mode == "neighbor":
interpolate = nn_interpolate_2D
else:
raise NotImplementedError("Unrecognized resampling mode: {}".format(mode))
out_rows, out_cols = new_dim
n_ex, in_rows, in_cols, n_in = X.shape
# compute coordinates to resample
x = np.tile(np.linspace(0, in_cols - 2, out_cols), out_rows)
y = np.repeat(np.linspace(0, in_rows - 2, out_rows), out_cols)
# resample each image
resampled = []
for i in range(n_ex):
r = interpolate(X[i, ...], x, y)
r = r.reshape(out_rows, out_cols, n_in)
resampled.append(r)
return np.dstack(resampled) | Resample each image (or similar grid-based 2D signal) in a batch to `new_dim` using the specified resampling strategy. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_channels)` An input image volume new_dim : 2-tuple of `(out_rows, out_cols)` The dimension to resample each image to mode : {'bilinear', 'neighbor'} The resampling strategy to employ. Default is 'bilinear'. Returns ------- resampled : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, in_channels)` The resampled image volume. |
18,176 | import numpy as np
from numpy.lib.stride_tricks import as_strided
from ..utils.windows import WindowInitializer
The provided code snippet includes necessary dependencies for implementing the `nn_interpolate_1D` function. Write a Python function `def nn_interpolate_1D(X, t)` to solve the following problem:
Estimates of the signal values at `X[t]` using a nearest neighbor interpolation strategy. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(in_length, in_channels)` An input image sampled along an integer `in_length` t : list of length `k` A list of coordinates for the samples we wish to generate Returns ------- samples : :py:class:`ndarray <numpy.ndarray>` of shape `(k, in_channels)` The samples for each (x,y) coordinate computed via nearest neighbor interpolation
Here is the function:
def nn_interpolate_1D(X, t):
"""
Estimates of the signal values at `X[t]` using a nearest neighbor
interpolation strategy.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(in_length, in_channels)`
An input image sampled along an integer `in_length`
t : list of length `k`
A list of coordinates for the samples we wish to generate
Returns
-------
samples : :py:class:`ndarray <numpy.ndarray>` of shape `(k, in_channels)`
The samples for each (x,y) coordinate computed via nearest neighbor
interpolation
"""
nt = np.clip(np.around(t), 0, X.shape[0] - 1).astype(int)
return X[nt, :] | Estimates of the signal values at `X[t]` using a nearest neighbor interpolation strategy. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(in_length, in_channels)` An input image sampled along an integer `in_length` t : list of length `k` A list of coordinates for the samples we wish to generate Returns ------- samples : :py:class:`ndarray <numpy.ndarray>` of shape `(k, in_channels)` The samples for each (x,y) coordinate computed via nearest neighbor interpolation |
18,177 | import numpy as np
from numpy.lib.stride_tricks import as_strided
from ..utils.windows import WindowInitializer
The provided code snippet includes necessary dependencies for implementing the `__DCT2` function. Write a Python function `def __DCT2(frame)` to solve the following problem:
Currently broken
Here is the function:
def __DCT2(frame):
"""Currently broken"""
N = len(frame) # window length
k = np.arange(N, dtype=float)
F = k.reshape(1, -1) * k.reshape(-1, 1)
K = np.divide(F, k, out=np.zeros_like(F), where=F != 0)
FC = np.cos(F * np.pi / N + K * np.pi / 2 * N)
return 2 * (FC @ frame) | Currently broken |
18,178 | import numpy as np
from numpy.lib.stride_tricks import as_strided
from ..utils.windows import WindowInitializer
The provided code snippet includes necessary dependencies for implementing the `autocorrelate1D` function. Write a Python function `def autocorrelate1D(x)` to solve the following problem:
Autocorrelate a 1D signal `x` with itself. Notes ----- The `k` th term in the 1 dimensional autocorrelation is .. math:: a_k = \sum_n x_{n + k} x_n NB. This is a naive :math:`O(N^2)` implementation. For a faster :math:`O(N \log N)` approach using the FFT, see [1]. References ---------- .. [1] https://en.wikipedia.org/wiki/Autocorrelation#Efficient%computation Parameters ---------- x : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` A 1D signal consisting of N samples Returns ------- auto : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The autocorrelation of `x` with itself
Here is the function:
def autocorrelate1D(x):
"""
Autocorrelate a 1D signal `x` with itself.
Notes
-----
The `k` th term in the 1 dimensional autocorrelation is
.. math::
a_k = \sum_n x_{n + k} x_n
NB. This is a naive :math:`O(N^2)` implementation. For a faster :math:`O(N
\log N)` approach using the FFT, see [1].
References
----------
.. [1] https://en.wikipedia.org/wiki/Autocorrelation#Efficient%computation
Parameters
----------
x : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
A 1D signal consisting of N samples
Returns
-------
auto : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The autocorrelation of `x` with itself
"""
N = len(x)
auto = np.zeros(N)
for k in range(N):
for n in range(N - k):
auto[k] += x[n + k] * x[n]
return auto | Autocorrelate a 1D signal `x` with itself. Notes ----- The `k` th term in the 1 dimensional autocorrelation is .. math:: a_k = \sum_n x_{n + k} x_n NB. This is a naive :math:`O(N^2)` implementation. For a faster :math:`O(N \log N)` approach using the FFT, see [1]. References ---------- .. [1] https://en.wikipedia.org/wiki/Autocorrelation#Efficient%computation Parameters ---------- x : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` A 1D signal consisting of N samples Returns ------- auto : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` The autocorrelation of `x` with itself |
18,179 | import numpy as np
from numpy.lib.stride_tricks import as_strided
from ..utils.windows import WindowInitializer
def DCT(frame, orthonormal=True):
"""
A naive :math:`O(N^2)` implementation of the 1D discrete cosine transform-II
(DCT-II).
Notes
-----
For a signal :math:`\mathbf{x} = [x_1, \ldots, x_N]` consisting of `N`
samples, the `k` th DCT coefficient, :math:`c_k`, is
.. math::
c_k = 2 \sum_{n=0}^{N-1} x_n \cos(\pi k (2 n + 1) / (2 N))
where `k` ranges from :math:`0, \ldots, N-1`.
The DCT is highly similar to the DFT -- whereas in a DFT the basis
functions are sinusoids, in a DCT they are restricted solely to cosines. A
signal's DCT representation tends to have more of its energy concentrated
in a smaller number of coefficients when compared to the DFT, and is thus
commonly used for signal compression. [1]
.. [1] Smoother signals can be accurately approximated using fewer DFT / DCT
coefficients, resulting in a higher compression ratio. The DCT naturally
yields a continuous extension at the signal boundaries due its use of
even basis functions (cosine). This in turn produces a smoother
extension in comparison to DFT or DCT approximations, resulting in a
higher compression.
Parameters
----------
frame : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
A signal frame consisting of N samples
orthonormal : bool
Scale to ensure the coefficient vector is orthonormal. Default is True.
Returns
-------
dct : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The discrete cosine transform of the samples in `frame`.
"""
N = len(frame)
out = np.zeros_like(frame)
for k in range(N):
for (n, xn) in enumerate(frame):
out[k] += xn * np.cos(np.pi * k * (2 * n + 1) / (2 * N))
scale = np.sqrt(1 / (4 * N)) if k == 0 else np.sqrt(1 / (2 * N))
out[k] *= 2 * scale if orthonormal else 2
return out
def cepstral_lifter(mfccs, D):
"""
A simple sinusoidal filter applied in the Mel-frequency domain.
Notes
-----
Cepstral lifting helps to smooth the spectral envelope and dampen the
magnitude of the higher MFCC coefficients while keeping the other
coefficients unchanged. The filter function is:
.. math::
\\text{lifter}( x_n ) = x_n \left(1 + \\frac{D \sin(\pi n / D)}{2}\\right)
Parameters
----------
mfccs : :py:class:`ndarray <numpy.ndarray>` of shape `(G, C)`
Matrix of Mel cepstral coefficients. Rows correspond to frames, columns
to cepstral coefficients
D : int in :math:`[0, +\infty]`
The filter coefficient. 0 corresponds to no filtering, larger values
correspond to greater amounts of smoothing
Returns
-------
out : :py:class:`ndarray <numpy.ndarray>` of shape `(G, C)`
The lifter'd MFCC coefficients
"""
if D == 0:
return mfccs
n = np.arange(mfccs.shape[1])
return mfccs * (1 + (D / 2) * np.sin(np.pi * n / D))
def mel_spectrogram(
x,
window_duration=0.025,
stride_duration=0.01,
mean_normalize=True,
window="hamming",
n_filters=20,
center=True,
alpha=0.95,
fs=44000,
):
"""
Apply the Mel-filterbank to the power spectrum for a signal `x`.
Notes
-----
The Mel spectrogram is the projection of the power spectrum of the framed
and windowed signal onto the basis set provided by the Mel filterbank.
Parameters
----------
x : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
A 1D signal consisting of N samples
window_duration : float
The duration of each frame / window (in seconds). Default is 0.025.
stride_duration : float
The duration of the hop between consecutive windows (in seconds).
Default is 0.01.
mean_normalize : bool
Whether to subtract the coefficient means from the final filter values
to improve the signal-to-noise ratio. Default is True.
window : {'hamming', 'hann', 'blackman_harris'}
The windowing function to apply to the signal before FFT. Default is
'hamming'.
n_filters : int
The number of mel filters to include in the filterbank. Default is 20.
center : bool
Whether to the `k` th frame of the signal should *begin* at index ``x[k *
stride_len]`` (center = False) or be *centered* at ``x[k * stride_len]``
(center = True). Default is False.
alpha : float in [0, 1)
The coefficient for the preemphasis filter. A value of 0 corresponds to
no filtering. Default is 0.95.
fs : int
The sample rate/frequency for the signal. Default is 44000.
Returns
-------
filter_energies : :py:class:`ndarray <numpy.ndarray>` of shape `(G, n_filters)`
The (possibly mean_normalized) power for each filter in the Mel
filterbank (i.e., the Mel spectrogram). Rows correspond to frames,
columns to filters
energy_per_frame : :py:class:`ndarray <numpy.ndarray>` of shape `(G,)`
The total energy in each frame of the signal
"""
eps = np.finfo(float).eps
window_fn = WindowInitializer()(window)
stride = round(stride_duration * fs)
frame_width = round(window_duration * fs)
N = frame_width
# add a preemphasis filter to the raw signal
x = preemphasis(x, alpha)
# convert signal to overlapping frames and apply a window function
x = np.pad(x, N // 2, "reflect") if center else x
frames = to_frames(x, frame_width, stride, fs)
window = np.tile(window_fn(frame_width), (frames.shape[0], 1))
frames = frames * window
# compute the power spectrum
power_spec = power_spectrum(frames)
energy_per_frame = np.sum(power_spec, axis=1)
energy_per_frame[energy_per_frame == 0] = eps
# compute the power at each filter in the Mel filterbank
fbank = mel_filterbank(N, n_filters=n_filters, fs=fs)
filter_energies = power_spec @ fbank.T
filter_energies -= np.mean(filter_energies, axis=0) if mean_normalize else 0
filter_energies[filter_energies == 0] = eps
return filter_energies, energy_per_frame
The provided code snippet includes necessary dependencies for implementing the `mfcc` function. Write a Python function `def mfcc( x, fs=44000, n_mfccs=13, alpha=0.95, center=True, n_filters=20, window="hann", normalize=True, lifter_coef=22, stride_duration=0.01, window_duration=0.025, replace_intercept=True, )` to solve the following problem:
Compute the Mel-frequency cepstral coefficients (MFCC) for a signal. Notes ----- Computing MFCC features proceeds in the following stages: 1. Convert the signal into overlapping frames and apply a window fn 2. Compute the power spectrum at each frame 3. Apply the mel filterbank to the power spectra to get mel filterbank powers 4. Take the logarithm of the mel filterbank powers at each frame 5. Take the discrete cosine transform (DCT) of the log filterbank energies and retain only the first k coefficients to further reduce the dimensionality MFCCs were developed in the context of HMM-GMM automatic speech recognition (ASR) systems and can be used to provide a somewhat speaker/pitch invariant representation of phonemes. Parameters ---------- x : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` A 1D signal consisting of N samples fs : int The sample rate/frequency for the signal. Default is 44000. n_mfccs : int The number of cepstral coefficients to return (including the intercept coefficient). Default is 13. alpha : float in [0, 1) The preemphasis coefficient. A value of 0 corresponds to no filtering. Default is 0.95. center : bool Whether to the kth frame of the signal should *begin* at index ``x[k * stride_len]`` (center = False) or be *centered* at ``x[k * stride_len]`` (center = True). Default is True. n_filters : int The number of filters to include in the Mel filterbank. Default is 20. normalize : bool Whether to mean-normalize the MFCC values. Default is True. lifter_coef : int in :math:[0, + \infty]` The cepstral filter coefficient. 0 corresponds to no filtering, larger values correspond to greater amounts of smoothing. Default is 22. window : {'hamming', 'hann', 'blackman_harris'} The windowing function to apply to the signal before taking the DFT. Default is 'hann'. stride_duration : float The duration of the hop between consecutive windows (in seconds). Default is 0.01. window_duration : float The duration of each frame / window (in seconds). Default is 0.025. replace_intercept : bool Replace the first MFCC coefficient (the intercept term) with the log of the total frame energy instead. Default is True. Returns ------- mfccs : :py:class:`ndarray <numpy.ndarray>` of shape `(G, C)` Matrix of Mel-frequency cepstral coefficients. Rows correspond to frames, columns to cepstral coefficients
Here is the function:
def mfcc(
x,
fs=44000,
n_mfccs=13,
alpha=0.95,
center=True,
n_filters=20,
window="hann",
normalize=True,
lifter_coef=22,
stride_duration=0.01,
window_duration=0.025,
replace_intercept=True,
):
"""
Compute the Mel-frequency cepstral coefficients (MFCC) for a signal.
Notes
-----
Computing MFCC features proceeds in the following stages:
1. Convert the signal into overlapping frames and apply a window fn
2. Compute the power spectrum at each frame
3. Apply the mel filterbank to the power spectra to get mel filterbank powers
4. Take the logarithm of the mel filterbank powers at each frame
5. Take the discrete cosine transform (DCT) of the log filterbank
energies and retain only the first k coefficients to further reduce
the dimensionality
MFCCs were developed in the context of HMM-GMM automatic speech recognition
(ASR) systems and can be used to provide a somewhat speaker/pitch
invariant representation of phonemes.
Parameters
----------
x : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
A 1D signal consisting of N samples
fs : int
The sample rate/frequency for the signal. Default is 44000.
n_mfccs : int
The number of cepstral coefficients to return (including the intercept
coefficient). Default is 13.
alpha : float in [0, 1)
The preemphasis coefficient. A value of 0 corresponds to no
filtering. Default is 0.95.
center : bool
Whether to the kth frame of the signal should *begin* at index ``x[k *
stride_len]`` (center = False) or be *centered* at ``x[k * stride_len]``
(center = True). Default is True.
n_filters : int
The number of filters to include in the Mel filterbank. Default is 20.
normalize : bool
Whether to mean-normalize the MFCC values. Default is True.
lifter_coef : int in :math:[0, + \infty]`
The cepstral filter coefficient. 0 corresponds to no filtering, larger
values correspond to greater amounts of smoothing. Default is 22.
window : {'hamming', 'hann', 'blackman_harris'}
The windowing function to apply to the signal before taking the DFT.
Default is 'hann'.
stride_duration : float
The duration of the hop between consecutive windows (in seconds).
Default is 0.01.
window_duration : float
The duration of each frame / window (in seconds). Default is 0.025.
replace_intercept : bool
Replace the first MFCC coefficient (the intercept term) with the
log of the total frame energy instead. Default is True.
Returns
-------
mfccs : :py:class:`ndarray <numpy.ndarray>` of shape `(G, C)`
Matrix of Mel-frequency cepstral coefficients. Rows correspond to
frames, columns to cepstral coefficients
"""
# map the power spectrum for the (framed + windowed representation of) `x`
# onto the mel scale
filter_energies, frame_energies = mel_spectrogram(
x=x,
fs=fs,
alpha=alpha,
center=center,
window=window,
n_filters=n_filters,
mean_normalize=False,
window_duration=window_duration,
stride_duration=stride_duration,
)
log_energies = 10 * np.log10(filter_energies)
# perform a DCT on the log-mel coefficients to further reduce the data
# dimensionality -- the early DCT coefficients will capture the majority of
# the data, allowing us to discard coefficients > n_mfccs
mfccs = np.array([DCT(frame) for frame in log_energies])[:, :n_mfccs]
mfccs = cepstral_lifter(mfccs, D=lifter_coef)
mfccs -= np.mean(mfccs, axis=0) if normalize else 0
if replace_intercept:
# the 0th MFCC coefficient doesn't tell us anything about the spectrum;
# replace it with the log of the frame energy for something more
# informative
mfccs[:, 0] = np.log(frame_energies)
return mfccs | Compute the Mel-frequency cepstral coefficients (MFCC) for a signal. Notes ----- Computing MFCC features proceeds in the following stages: 1. Convert the signal into overlapping frames and apply a window fn 2. Compute the power spectrum at each frame 3. Apply the mel filterbank to the power spectra to get mel filterbank powers 4. Take the logarithm of the mel filterbank powers at each frame 5. Take the discrete cosine transform (DCT) of the log filterbank energies and retain only the first k coefficients to further reduce the dimensionality MFCCs were developed in the context of HMM-GMM automatic speech recognition (ASR) systems and can be used to provide a somewhat speaker/pitch invariant representation of phonemes. Parameters ---------- x : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)` A 1D signal consisting of N samples fs : int The sample rate/frequency for the signal. Default is 44000. n_mfccs : int The number of cepstral coefficients to return (including the intercept coefficient). Default is 13. alpha : float in [0, 1) The preemphasis coefficient. A value of 0 corresponds to no filtering. Default is 0.95. center : bool Whether to the kth frame of the signal should *begin* at index ``x[k * stride_len]`` (center = False) or be *centered* at ``x[k * stride_len]`` (center = True). Default is True. n_filters : int The number of filters to include in the Mel filterbank. Default is 20. normalize : bool Whether to mean-normalize the MFCC values. Default is True. lifter_coef : int in :math:[0, + \infty]` The cepstral filter coefficient. 0 corresponds to no filtering, larger values correspond to greater amounts of smoothing. Default is 22. window : {'hamming', 'hann', 'blackman_harris'} The windowing function to apply to the signal before taking the DFT. Default is 'hann'. stride_duration : float The duration of the hop between consecutive windows (in seconds). Default is 0.01. window_duration : float The duration of each frame / window (in seconds). Default is 0.025. replace_intercept : bool Replace the first MFCC coefficient (the intercept term) with the log of the total frame energy instead. Default is True. Returns ------- mfccs : :py:class:`ndarray <numpy.ndarray>` of shape `(G, C)` Matrix of Mel-frequency cepstral coefficients. Rows correspond to frames, columns to cepstral coefficients |
18,180 | import re
import heapq
import os.path as op
from collections import Counter, OrderedDict, defaultdict
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `ngrams` function. Write a Python function `def ngrams(sequence, N)` to solve the following problem:
Return all `N`-grams of the elements in `sequence`
Here is the function:
def ngrams(sequence, N):
"""Return all `N`-grams of the elements in `sequence`"""
assert N >= 1
return list(zip(*[sequence[i:] for i in range(N)])) | Return all `N`-grams of the elements in `sequence` |
18,181 | import re
import heapq
import os.path as op
from collections import Counter, OrderedDict, defaultdict
import numpy as np
def remove_stop_words(words):
"""Remove stop words from a list of word strings"""
return [w for w in words if w.lower() not in _STOP_WORDS]
def strip_punctuation(line):
"""Remove punctuation from a string"""
return line.translate(_PUNC_TABLE).strip()
The provided code snippet includes necessary dependencies for implementing the `tokenize_whitespace` function. Write a Python function `def tokenize_whitespace( line, lowercase=True, filter_stopwords=True, filter_punctuation=True, **kwargs, )` to solve the following problem:
Split a string at any whitespace characters, optionally removing punctuation and stop-words in the process.
Here is the function:
def tokenize_whitespace(
line, lowercase=True, filter_stopwords=True, filter_punctuation=True, **kwargs,
):
"""
Split a string at any whitespace characters, optionally removing
punctuation and stop-words in the process.
"""
line = line.lower() if lowercase else line
words = line.split()
line = [strip_punctuation(w) for w in words] if filter_punctuation else line
return remove_stop_words(words) if filter_stopwords else words | Split a string at any whitespace characters, optionally removing punctuation and stop-words in the process. |
18,182 | import re
import heapq
import os.path as op
from collections import Counter, OrderedDict, defaultdict
import numpy as np
def tokenize_words(
line, lowercase=True, filter_stopwords=True, filter_punctuation=True, **kwargs,
):
"""
Split a string into individual words, optionally removing punctuation and
stop-words in the process.
"""
REGEX = _WORD_REGEX if filter_punctuation else _WORD_REGEX_W_PUNC
words = REGEX.findall(line.lower() if lowercase else line)
return remove_stop_words(words) if filter_stopwords else words
The provided code snippet includes necessary dependencies for implementing the `tokenize_words_bytes` function. Write a Python function `def tokenize_words_bytes( line, lowercase=True, filter_stopwords=True, filter_punctuation=True, encoding="utf-8", **kwargs, )` to solve the following problem:
Split a string into individual words, optionally removing punctuation and stop-words in the process. Translate each word into a list of bytes.
Here is the function:
def tokenize_words_bytes(
line,
lowercase=True,
filter_stopwords=True,
filter_punctuation=True,
encoding="utf-8",
**kwargs,
):
"""
Split a string into individual words, optionally removing punctuation and
stop-words in the process. Translate each word into a list of bytes.
"""
words = tokenize_words(
line,
lowercase=lowercase,
filter_stopwords=filter_stopwords,
filter_punctuation=filter_punctuation,
**kwargs,
)
words = [" ".join([str(i) for i in w.encode(encoding)]) for w in words]
return words | Split a string into individual words, optionally removing punctuation and stop-words in the process. Translate each word into a list of bytes. |
18,183 | import re
import heapq
import os.path as op
from collections import Counter, OrderedDict, defaultdict
import numpy as np
_PUNC_BYTE_REGEX = re.compile(
r"(33|34|35|36|37|38|39|40|41|42|43|44|45|"
r"46|47|58|59|60|61|62|63|64|91|92|93|94|"
r"95|96|123|124|125|126)",
)
The provided code snippet includes necessary dependencies for implementing the `tokenize_bytes_raw` function. Write a Python function `def tokenize_bytes_raw(line, encoding="utf-8", splitter=None, **kwargs)` to solve the following problem:
Convert the characters in `line` to a collection of bytes. Each byte is represented in decimal as an integer between 0 and 255. Parameters ---------- line : str The string to tokenize. encoding : str The encoding scheme for the characters in `line`. Default is `'utf-8'`. splitter : {'punctuation', None} If `'punctuation'`, split the string at any punctuation character before encoding into bytes. If None, do not split `line` at all. Default is None. Returns ------- bytes : list A list of the byte-encoded characters in `line`. Each item in the list is a string of space-separated integers between 0 and 255 representing the bytes encoding the characters in `line`.
Here is the function:
def tokenize_bytes_raw(line, encoding="utf-8", splitter=None, **kwargs):
"""
Convert the characters in `line` to a collection of bytes. Each byte is
represented in decimal as an integer between 0 and 255.
Parameters
----------
line : str
The string to tokenize.
encoding : str
The encoding scheme for the characters in `line`. Default is `'utf-8'`.
splitter : {'punctuation', None}
If `'punctuation'`, split the string at any punctuation character
before encoding into bytes. If None, do not split `line` at all.
Default is None.
Returns
-------
bytes : list
A list of the byte-encoded characters in `line`. Each item in the list
is a string of space-separated integers between 0 and 255 representing
the bytes encoding the characters in `line`.
"""
byte_str = [" ".join([str(i) for i in line.encode(encoding)])]
if splitter == "punctuation":
byte_str = _PUNC_BYTE_REGEX.sub(r"-\1-", byte_str[0]).split("-")
return byte_str | Convert the characters in `line` to a collection of bytes. Each byte is represented in decimal as an integer between 0 and 255. Parameters ---------- line : str The string to tokenize. encoding : str The encoding scheme for the characters in `line`. Default is `'utf-8'`. splitter : {'punctuation', None} If `'punctuation'`, split the string at any punctuation character before encoding into bytes. If None, do not split `line` at all. Default is None. Returns ------- bytes : list A list of the byte-encoded characters in `line`. Each item in the list is a string of space-separated integers between 0 and 255 representing the bytes encoding the characters in `line`. |
18,184 | import re
import heapq
import os.path as op
from collections import Counter, OrderedDict, defaultdict
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bytes_to_chars` function. Write a Python function `def bytes_to_chars(byte_list, encoding="utf-8")` to solve the following problem:
Decode bytes (represented as an integer between 0 and 255) to characters in the specified encoding.
Here is the function:
def bytes_to_chars(byte_list, encoding="utf-8"):
"""
Decode bytes (represented as an integer between 0 and 255) to characters in
the specified encoding.
"""
hex_array = [hex(a).replace("0x", "") for a in byte_list]
hex_array = " ".join([h if len(h) > 1 else f"0{h}" for h in hex_array])
return bytearray.fromhex(hex_array).decode(encoding) | Decode bytes (represented as an integer between 0 and 255) to characters in the specified encoding. |
18,185 | import re
import heapq
import os.path as op
from collections import Counter, OrderedDict, defaultdict
import numpy as np
def strip_punctuation(line):
"""Remove punctuation from a string"""
return line.translate(_PUNC_TABLE).strip()
The provided code snippet includes necessary dependencies for implementing the `tokenize_chars` function. Write a Python function `def tokenize_chars(line, lowercase=True, filter_punctuation=True, **kwargs)` to solve the following problem:
Split a string into individual characters, optionally removing punctuation and stop-words in the process.
Here is the function:
def tokenize_chars(line, lowercase=True, filter_punctuation=True, **kwargs):
"""
Split a string into individual characters, optionally removing punctuation
and stop-words in the process.
"""
line = line.lower() if lowercase else line
line = strip_punctuation(line) if filter_punctuation else line
chars = list(re.sub(" {2,}", " ", line).strip())
return chars | Split a string into individual characters, optionally removing punctuation and stop-words in the process. |
18,186 | import json
import hashlib
import warnings
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `minibatch` function. Write a Python function `def minibatch(X, batchsize=256, shuffle=True)` to solve the following problem:
Compute the minibatch indices for a training dataset. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)` The dataset to divide into minibatches. Assumes the first dimension represents the number of training examples. batchsize : int The desired size of each minibatch. Note, however, that if ``X.shape[0] % batchsize > 0`` then the final batch will contain fewer than batchsize entries. Default is 256. shuffle : bool Whether to shuffle the entries in the dataset before dividing into minibatches. Default is True. Returns ------- mb_generator : generator A generator which yields the indices into `X` for each batch. n_batches: int The number of batches.
Here is the function:
def minibatch(X, batchsize=256, shuffle=True):
"""
Compute the minibatch indices for a training dataset.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)`
The dataset to divide into minibatches. Assumes the first dimension
represents the number of training examples.
batchsize : int
The desired size of each minibatch. Note, however, that if ``X.shape[0] %
batchsize > 0`` then the final batch will contain fewer than batchsize
entries. Default is 256.
shuffle : bool
Whether to shuffle the entries in the dataset before dividing into
minibatches. Default is True.
Returns
-------
mb_generator : generator
A generator which yields the indices into `X` for each batch.
n_batches: int
The number of batches.
"""
N = X.shape[0]
ix = np.arange(N)
n_batches = int(np.ceil(N / batchsize))
if shuffle:
np.random.shuffle(ix)
def mb_generator():
for i in range(n_batches):
yield ix[i * batchsize : (i + 1) * batchsize]
return mb_generator(), n_batches | Compute the minibatch indices for a training dataset. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)` The dataset to divide into minibatches. Assumes the first dimension represents the number of training examples. batchsize : int The desired size of each minibatch. Note, however, that if ``X.shape[0] % batchsize > 0`` then the final batch will contain fewer than batchsize entries. Default is 256. shuffle : bool Whether to shuffle the entries in the dataset before dividing into minibatches. Default is True. Returns ------- mb_generator : generator A generator which yields the indices into `X` for each batch. n_batches: int The number of batches. |
18,187 | from abc import ABC, abstractmethod
import numpy as np
class Dropout(WrapperBase):
def __init__(self, wrapped_layer, p):
"""
A dropout regularization wrapper.
Notes
-----
During training, a dropout layer zeroes each element of the layer input
with probability `p` and scales the activation by `1 / (1 - p)` (to reflect
the fact that on average only `(1 - p) * N` units are active on any
training pass). At test time, does not adjust elements of the input at
all (ie., simply computes the identity function).
Parameters
----------
wrapped_layer : :doc:`Layer <numpy_ml.neural_nets.layers>` instance
The layer to apply dropout to.
p : float in [0, 1)
The dropout propbability during training
"""
super().__init__(wrapped_layer)
self.p = p
self._init_wrapper_params()
self._init_params()
def _init_wrapper_params(self):
self._wrapper_derived_variables = {"dropout_mask": []}
self._wrapper_hyperparameters = {"wrapper": "Dropout", "p": self.p}
def forward(self, X, retain_derived=True):
"""
Compute the layer output with dropout for a single minibatch.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, n_in)`
Layer input, representing the `n_in`-dimensional features for a
minibatch of `n_ex` examples.
retain_derived : bool
Whether to retain the variables calculated during the forward pass
for use later during backprop. If False, this suggests the layer
will not be expected to backprop through wrt. this input. Default
is True.
Returns
-------
Y : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, n_out)`
Layer output for each of the `n_ex` examples.
"""
scaler, mask = 1.0, np.ones(X.shape).astype(bool)
if self.trainable:
scaler = 1.0 / (1.0 - self.p)
mask = np.random.rand(*X.shape) >= self.p
X = mask * X
if retain_derived:
self._wrapper_derived_variables["dropout_mask"].append(mask)
return scaler * self._base_layer.forward(X, retain_derived)
def backward(self, dLdy, retain_grads=True):
"""
Backprop from the base layer's outputs to inputs.
Parameters
----------
dLdy : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, n_out)` or list of arrays
The gradient(s) of the loss wrt. the layer output(s).
retain_grads : bool
Whether to include the intermediate parameter gradients computed
during the backward pass in the final parameter update. Default is
True.
Returns
-------
dLdX : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, n_in)` or list of arrays
The gradient of the loss wrt. the layer input(s) `X`.
""" # noqa: E501
assert self.trainable, "Layer is frozen"
dLdy *= 1.0 / (1.0 - self.p)
return self._base_layer.backward(dLdy, retain_grads)
The provided code snippet includes necessary dependencies for implementing the `init_wrappers` function. Write a Python function `def init_wrappers(layer, wrappers_list)` to solve the following problem:
Initialize the layer wrappers in `wrapper_list` and return a wrapped `layer` object. Parameters ---------- layer : :doc:`Layer <numpy_ml.neural_nets.layers>` instance The base layer object to apply the wrappers to. wrappers : list of dicts A list of parameter dictionaries for a the wrapper objects. The wrappers are initialized and applied to the the layer sequentially. Returns ------- wrapped_layer : :class:`WrapperBase` instance The wrapped layer object
Here is the function:
def init_wrappers(layer, wrappers_list):
"""
Initialize the layer wrappers in `wrapper_list` and return a wrapped
`layer` object.
Parameters
----------
layer : :doc:`Layer <numpy_ml.neural_nets.layers>` instance
The base layer object to apply the wrappers to.
wrappers : list of dicts
A list of parameter dictionaries for a the wrapper objects. The
wrappers are initialized and applied to the the layer sequentially.
Returns
-------
wrapped_layer : :class:`WrapperBase` instance
The wrapped layer object
"""
for wr in wrappers_list:
if wr["wrapper"] == "Dropout":
layer = Dropout(layer, 1)._set_wrapper_params(wr)
else:
raise NotImplementedError("{}".format(wr["wrapper"]))
return layer | Initialize the layer wrappers in `wrapper_list` and return a wrapped `layer` object. Parameters ---------- layer : :doc:`Layer <numpy_ml.neural_nets.layers>` instance The base layer object to apply the wrappers to. wrappers : list of dicts A list of parameter dictionaries for a the wrapper objects. The wrappers are initialized and applied to the the layer sequentially. Returns ------- wrapped_layer : :class:`WrapperBase` instance The wrapped layer object |
18,188 | from copy import deepcopy
from abc import ABC, abstractmethod
import numpy as np
from math import erf
The provided code snippet includes necessary dependencies for implementing the `gaussian_cdf` function. Write a Python function `def gaussian_cdf(x, mean, var)` to solve the following problem:
Compute the probability that a random draw from a 1D Gaussian with mean `mean` and variance `var` is less than or equal to `x`.
Here is the function:
def gaussian_cdf(x, mean, var):
"""
Compute the probability that a random draw from a 1D Gaussian with mean
`mean` and variance `var` is less than or equal to `x`.
"""
eps = np.finfo(float).eps
x_scaled = (x - mean) / np.sqrt(var + eps)
return (1 + erf(x_scaled / np.sqrt(2))) / 2 | Compute the probability that a random draw from a 1D Gaussian with mean `mean` and variance `var` is less than or equal to `x`. |
18,189 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `minibatch` function. Write a Python function `def minibatch(X, batchsize=256, shuffle=True)` to solve the following problem:
Compute the minibatch indices for a training dataset. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)` The dataset to divide into minibatches. Assumes the first dimension represents the number of training examples. batchsize : int The desired size of each minibatch. Note, however, that if ``X.shape[0] % batchsize > 0`` then the final batch will contain fewer than batchsize entries. Default is 256. shuffle : bool Whether to shuffle the entries in the dataset before dividing into minibatches. Default is True. Returns ------- mb_generator : generator A generator which yields the indices into X for each batch n_batches: int The number of batches
Here is the function:
def minibatch(X, batchsize=256, shuffle=True):
"""
Compute the minibatch indices for a training dataset.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)`
The dataset to divide into minibatches. Assumes the first dimension
represents the number of training examples.
batchsize : int
The desired size of each minibatch. Note, however, that if ``X.shape[0] %
batchsize > 0`` then the final batch will contain fewer than batchsize
entries. Default is 256.
shuffle : bool
Whether to shuffle the entries in the dataset before dividing into
minibatches. Default is True.
Returns
-------
mb_generator : generator
A generator which yields the indices into X for each batch
n_batches: int
The number of batches
"""
N = X.shape[0]
ix = np.arange(N)
n_batches = int(np.ceil(N / batchsize))
if shuffle:
np.random.shuffle(ix)
def mb_generator():
for i in range(n_batches):
yield ix[i * batchsize : (i + 1) * batchsize]
return mb_generator(), n_batches | Compute the minibatch indices for a training dataset. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)` The dataset to divide into minibatches. Assumes the first dimension represents the number of training examples. batchsize : int The desired size of each minibatch. Note, however, that if ``X.shape[0] % batchsize > 0`` then the final batch will contain fewer than batchsize entries. Default is 256. shuffle : bool Whether to shuffle the entries in the dataset before dividing into minibatches. Default is True. Returns ------- mb_generator : generator A generator which yields the indices into X for each batch n_batches: int The number of batches |
18,190 | import numpy as np
def pad1D(X, pad, kernel_width=None, stride=None, dilation=0):
"""
Zero-pad a 3D input volume `X` along the second dimension.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_in, in_ch)`
Input volume. Padding is applied to `l_in`.
pad : tuple, int, or {'same', 'causal'}
The padding amount. If 'same', add padding to ensure that the output
length of a 1D convolution with a kernel of `kernel_shape` and stride
`stride` is the same as the input length. If 'causal' compute padding
such that the output both has the same length as the input AND
``output[t]`` does not depend on ``input[t + 1:]``. If 2-tuple,
specifies the number of padding columns to add on each side of the
sequence.
kernel_width : int
The dimension of the 2D convolution kernel. Only relevant if p='same'
or 'causal'. Default is None.
stride : int
The stride for the convolution kernel. Only relevant if p='same' or
'causal'. Default is None.
dilation : int
The dilation of the convolution kernel. Only relevant if p='same' or
'causal'. Default is None.
Returns
-------
X_pad : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, padded_seq, in_channels)`
The padded output volume
p : 2-tuple
The number of 0-padded columns added to the (left, right) of the sequences
in `X`.
"""
p = pad
if isinstance(p, int):
p = (p, p)
if isinstance(p, tuple):
X_pad = np.pad(
X,
pad_width=((0, 0), (p[0], p[1]), (0, 0)),
mode="constant",
constant_values=0,
)
# compute the correct padding dims for a 'same' or 'causal' convolution
if p in ["same", "causal"] and kernel_width and stride:
causal = p == "causal"
p = calc_pad_dims_1D(
X.shape, X.shape[1], kernel_width, stride, causal=causal, dilation=dilation
)
X_pad, p = pad1D(X, p)
return X_pad, p
def pad2D(X, pad, kernel_shape=None, stride=None, dilation=0):
"""
Zero-pad a 4D input volume `X` along the second and third dimensions.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume. Padding is applied to `in_rows` and `in_cols`.
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
has the same dimensions as the input. If 2-tuple, specifies the number
of padding rows and colums to add *on both sides* of the rows/columns
in `X`. If 4-tuple, specifies the number of rows/columns to add to the
top, bottom, left, and right of the input volume.
kernel_shape : 2-tuple
The dimension of the 2D convolution kernel. Only relevant if p='same'.
Default is None.
stride : int
The stride for the convolution kernel. Only relevant if p='same'.
Default is None.
dilation : int
The dilation of the convolution kernel. Only relevant if p='same'.
Default is 0.
Returns
-------
X_pad : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, padded_in_rows, padded_in_cols, in_channels)`
The padded output volume.
p : 4-tuple
The number of 0-padded rows added to the (top, bottom, left, right) of
`X`.
"""
p = pad
if isinstance(p, int):
p = (p, p, p, p)
if isinstance(p, tuple):
if len(p) == 2:
p = (p[0], p[0], p[1], p[1])
X_pad = np.pad(
X,
pad_width=((0, 0), (p[0], p[1]), (p[2], p[3]), (0, 0)),
mode="constant",
constant_values=0,
)
# compute the correct padding dims for a 'same' convolution
if p == "same" and kernel_shape and stride is not None:
p = calc_pad_dims_2D(
X.shape, X.shape[1:3], kernel_shape, stride, dilation=dilation
)
X_pad, p = pad2D(X, p)
return X_pad, p
The provided code snippet includes necessary dependencies for implementing the `calc_conv_out_dims` function. Write a Python function `def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0)` to solve the following problem:
Compute the dimension of the output volume for the specified convolution. Parameters ---------- X_shape : 3-tuple or 4-tuple The dimensions of the input volume to the convolution. If 3-tuple, entries are expected to be (`n_ex`, `in_length`, `in_ch`). If 4-tuple, entries are expected to be (`n_ex`, `in_rows`, `in_cols`, `in_ch`). weight_shape : 3-tuple or 4-tuple The dimensions of the weight volume for the convolution. If 3-tuple, entries are expected to be (`f_len`, `in_ch`, `out_ch`). If 4-tuple, entries are expected to be (`fr`, `fc`, `in_ch`, `out_ch`). pad : tuple, int, or {'same', 'causal'} The padding amount. If 'same', add padding to ensure that the output length of a 1D convolution with a kernel of `kernel_shape` and stride `stride` is the same as the input length. If 'causal' compute padding such that the output both has the same length as the input AND ``output[t]`` does not depend on ``input[t + 1:]``. If 2-tuple, specifies the number of padding columns to add on each side of the sequence. Default is 0. stride : int The stride for the convolution kernel. Default is 1. dilation : int The dilation of the convolution kernel. Default is 0. Returns ------- out_dims : 3-tuple or 4-tuple The dimensions of the output volume. If 3-tuple, entries are (`n_ex`, `out_length`, `out_ch`). If 4-tuple, entries are (`n_ex`, `out_rows`, `out_cols`, `out_ch`).
Here is the function:
def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0):
"""
Compute the dimension of the output volume for the specified convolution.
Parameters
----------
X_shape : 3-tuple or 4-tuple
The dimensions of the input volume to the convolution. If 3-tuple,
entries are expected to be (`n_ex`, `in_length`, `in_ch`). If 4-tuple,
entries are expected to be (`n_ex`, `in_rows`, `in_cols`, `in_ch`).
weight_shape : 3-tuple or 4-tuple
The dimensions of the weight volume for the convolution. If 3-tuple,
entries are expected to be (`f_len`, `in_ch`, `out_ch`). If 4-tuple,
entries are expected to be (`fr`, `fc`, `in_ch`, `out_ch`).
pad : tuple, int, or {'same', 'causal'}
The padding amount. If 'same', add padding to ensure that the output
length of a 1D convolution with a kernel of `kernel_shape` and stride
`stride` is the same as the input length. If 'causal' compute padding
such that the output both has the same length as the input AND
``output[t]`` does not depend on ``input[t + 1:]``. If 2-tuple, specifies the
number of padding columns to add on each side of the sequence. Default
is 0.
stride : int
The stride for the convolution kernel. Default is 1.
dilation : int
The dilation of the convolution kernel. Default is 0.
Returns
-------
out_dims : 3-tuple or 4-tuple
The dimensions of the output volume. If 3-tuple, entries are (`n_ex`,
`out_length`, `out_ch`). If 4-tuple, entries are (`n_ex`, `out_rows`,
`out_cols`, `out_ch`).
"""
dummy = np.zeros(X_shape)
s, p, d = stride, pad, dilation
if len(X_shape) == 3:
_, p = pad1D(dummy, p)
pw1, pw2 = p
fw, in_ch, out_ch = W_shape
n_ex, in_length, in_ch = X_shape
_fw = fw * (d + 1) - d
out_length = (in_length + pw1 + pw2 - _fw) // s + 1
out_dims = (n_ex, out_length, out_ch)
elif len(X_shape) == 4:
_, p = pad2D(dummy, p)
pr1, pr2, pc1, pc2 = p
fr, fc, in_ch, out_ch = W_shape
n_ex, in_rows, in_cols, in_ch = X_shape
# adjust effective filter size to account for dilation
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
out_rows = (in_rows + pr1 + pr2 - _fr) // s + 1
out_cols = (in_cols + pc1 + pc2 - _fc) // s + 1
out_dims = (n_ex, out_rows, out_cols, out_ch)
else:
raise ValueError("Unrecognized number of input dims: {}".format(len(X_shape)))
return out_dims | Compute the dimension of the output volume for the specified convolution. Parameters ---------- X_shape : 3-tuple or 4-tuple The dimensions of the input volume to the convolution. If 3-tuple, entries are expected to be (`n_ex`, `in_length`, `in_ch`). If 4-tuple, entries are expected to be (`n_ex`, `in_rows`, `in_cols`, `in_ch`). weight_shape : 3-tuple or 4-tuple The dimensions of the weight volume for the convolution. If 3-tuple, entries are expected to be (`f_len`, `in_ch`, `out_ch`). If 4-tuple, entries are expected to be (`fr`, `fc`, `in_ch`, `out_ch`). pad : tuple, int, or {'same', 'causal'} The padding amount. If 'same', add padding to ensure that the output length of a 1D convolution with a kernel of `kernel_shape` and stride `stride` is the same as the input length. If 'causal' compute padding such that the output both has the same length as the input AND ``output[t]`` does not depend on ``input[t + 1:]``. If 2-tuple, specifies the number of padding columns to add on each side of the sequence. Default is 0. stride : int The stride for the convolution kernel. Default is 1. dilation : int The dilation of the convolution kernel. Default is 0. Returns ------- out_dims : 3-tuple or 4-tuple The dimensions of the output volume. If 3-tuple, entries are (`n_ex`, `out_length`, `out_ch`). If 4-tuple, entries are (`n_ex`, `out_rows`, `out_cols`, `out_ch`). |
18,191 | import numpy as np
def _im2col_indices(X_shape, fr, fc, p, s, d=0):
"""
Helper function that computes indices into X in prep for columnization in
:func:`im2col`.
Code extended from Andrej Karpathy's `im2col.py`
"""
pr1, pr2, pc1, pc2 = p
n_ex, n_in, in_rows, in_cols = X_shape
# adjust effective filter size to account for dilation
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
out_rows = (in_rows + pr1 + pr2 - _fr) // s + 1
out_cols = (in_cols + pc1 + pc2 - _fc) // s + 1
if any([out_rows <= 0, out_cols <= 0]):
raise ValueError(
"Dimension mismatch during convolution: "
"out_rows = {}, out_cols = {}".format(out_rows, out_cols)
)
# i1/j1 : row/col templates
# i0/j0 : n. copies (len) and offsets (values) for row/col templates
i0 = np.repeat(np.arange(fr), fc)
i0 = np.tile(i0, n_in) * (d + 1)
i1 = s * np.repeat(np.arange(out_rows), out_cols)
j0 = np.tile(np.arange(fc), fr * n_in) * (d + 1)
j1 = s * np.tile(np.arange(out_cols), out_rows)
# i.shape = (fr * fc * n_in, out_height * out_width)
# j.shape = (fr * fc * n_in, out_height * out_width)
# k.shape = (fr * fc * n_in, 1)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(n_in), fr * fc).reshape(-1, 1)
return k, i, j
The provided code snippet includes necessary dependencies for implementing the `col2im` function. Write a Python function `def col2im(X_col, X_shape, W_shape, pad, stride, dilation=0)` to solve the following problem:
Take columns of a 2D matrix and rearrange them into the blocks/windows of a 4D image volume. Notes ----- A NumPy reimagining of MATLAB's ``col2im`` 'sliding' function. Code extended from Andrej Karpathy's ``im2col.py``. Parameters ---------- X_col : :py:class:`ndarray <numpy.ndarray>` of shape `(Q, Z)` The columnized version of `X` (assumed to include padding) X_shape : 4-tuple containing `(n_ex, in_rows, in_cols, in_ch)` The original dimensions of `X` (not including padding) W_shape: 4-tuple containing `(kernel_rows, kernel_cols, in_ch, out_ch)` The dimensions of the weights in the present convolutional layer pad : 4-tuple of `(left, right, up, down)` Number of zero-padding rows/cols to add to `X` stride : int The stride of each convolution kernel dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- img : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)` The reshaped `X_col` input matrix
Here is the function:
def col2im(X_col, X_shape, W_shape, pad, stride, dilation=0):
"""
Take columns of a 2D matrix and rearrange them into the blocks/windows of
a 4D image volume.
Notes
-----
A NumPy reimagining of MATLAB's ``col2im`` 'sliding' function.
Code extended from Andrej Karpathy's ``im2col.py``.
Parameters
----------
X_col : :py:class:`ndarray <numpy.ndarray>` of shape `(Q, Z)`
The columnized version of `X` (assumed to include padding)
X_shape : 4-tuple containing `(n_ex, in_rows, in_cols, in_ch)`
The original dimensions of `X` (not including padding)
W_shape: 4-tuple containing `(kernel_rows, kernel_cols, in_ch, out_ch)`
The dimensions of the weights in the present convolutional layer
pad : 4-tuple of `(left, right, up, down)`
Number of zero-padding rows/cols to add to `X`
stride : int
The stride of each convolution kernel
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
img : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
The reshaped `X_col` input matrix
"""
if not (isinstance(pad, tuple) and len(pad) == 4):
raise TypeError("pad must be a 4-tuple, but got: {}".format(pad))
s, d = stride, dilation
pr1, pr2, pc1, pc2 = pad
fr, fc, n_in, n_out = W_shape
n_ex, in_rows, in_cols, n_in = X_shape
X_pad = np.zeros((n_ex, n_in, in_rows + pr1 + pr2, in_cols + pc1 + pc2))
k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, pad, s, d)
X_col_reshaped = X_col.reshape(n_in * fr * fc, -1, n_ex)
X_col_reshaped = X_col_reshaped.transpose(2, 0, 1)
np.add.at(X_pad, (slice(None), k, i, j), X_col_reshaped)
pr2 = None if pr2 == 0 else -pr2
pc2 = None if pc2 == 0 else -pc2
return X_pad[:, :, pr1:pr2, pc1:pc2] | Take columns of a 2D matrix and rearrange them into the blocks/windows of a 4D image volume. Notes ----- A NumPy reimagining of MATLAB's ``col2im`` 'sliding' function. Code extended from Andrej Karpathy's ``im2col.py``. Parameters ---------- X_col : :py:class:`ndarray <numpy.ndarray>` of shape `(Q, Z)` The columnized version of `X` (assumed to include padding) X_shape : 4-tuple containing `(n_ex, in_rows, in_cols, in_ch)` The original dimensions of `X` (not including padding) W_shape: 4-tuple containing `(kernel_rows, kernel_cols, in_ch, out_ch)` The dimensions of the weights in the present convolutional layer pad : 4-tuple of `(left, right, up, down)` Number of zero-padding rows/cols to add to `X` stride : int The stride of each convolution kernel dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- img : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)` The reshaped `X_col` input matrix |
18,192 | import numpy as np
def pad1D(X, pad, kernel_width=None, stride=None, dilation=0):
"""
Zero-pad a 3D input volume `X` along the second dimension.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_in, in_ch)`
Input volume. Padding is applied to `l_in`.
pad : tuple, int, or {'same', 'causal'}
The padding amount. If 'same', add padding to ensure that the output
length of a 1D convolution with a kernel of `kernel_shape` and stride
`stride` is the same as the input length. If 'causal' compute padding
such that the output both has the same length as the input AND
``output[t]`` does not depend on ``input[t + 1:]``. If 2-tuple,
specifies the number of padding columns to add on each side of the
sequence.
kernel_width : int
The dimension of the 2D convolution kernel. Only relevant if p='same'
or 'causal'. Default is None.
stride : int
The stride for the convolution kernel. Only relevant if p='same' or
'causal'. Default is None.
dilation : int
The dilation of the convolution kernel. Only relevant if p='same' or
'causal'. Default is None.
Returns
-------
X_pad : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, padded_seq, in_channels)`
The padded output volume
p : 2-tuple
The number of 0-padded columns added to the (left, right) of the sequences
in `X`.
"""
p = pad
if isinstance(p, int):
p = (p, p)
if isinstance(p, tuple):
X_pad = np.pad(
X,
pad_width=((0, 0), (p[0], p[1]), (0, 0)),
mode="constant",
constant_values=0,
)
# compute the correct padding dims for a 'same' or 'causal' convolution
if p in ["same", "causal"] and kernel_width and stride:
causal = p == "causal"
p = calc_pad_dims_1D(
X.shape, X.shape[1], kernel_width, stride, causal=causal, dilation=dilation
)
X_pad, p = pad1D(X, p)
return X_pad, p
def conv2D(X, W, stride, pad, dilation=0):
"""
A faster (but more memory intensive) implementation of the 2D "convolution"
(technically, cross-correlation) of input `X` with a collection of kernels in
`W`.
Notes
-----
Relies on the :func:`im2col` function to perform the convolution as a single
matrix multiplication.
For a helpful diagram, see Pete Warden's 2015 blogpost [1].
References
----------
.. [1] Warden (2015). "Why GEMM is at the heart of deep learning,"
https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume (unpadded).
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)`
A volume of convolution weights/kernels for a given layer.
stride : int
The stride of each convolution kernel.
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding rows and colums to add *on both
sides* of the rows/columns in `X`. If 4-tuple, specifies the number of
rows/columns to add to the top, bottom, left, and right of the input
volume.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, out_ch)`
The covolution of `X` with `W`.
"""
s, d = stride, dilation
_, p = pad2D(X, pad, W.shape[:2], s, dilation=dilation)
pr1, pr2, pc1, pc2 = p
fr, fc, in_ch, out_ch = W.shape
n_ex, in_rows, in_cols, in_ch = X.shape
# update effective filter shape based on dilation factor
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
# compute the dimensions of the convolution output
out_rows = int((in_rows + pr1 + pr2 - _fr) / s + 1)
out_cols = int((in_cols + pc1 + pc2 - _fc) / s + 1)
# convert X and W into the appropriate 2D matrices and take their product
X_col, _ = im2col(X, W.shape, p, s, d)
W_col = W.transpose(3, 2, 0, 1).reshape(out_ch, -1)
Z = (W_col @ X_col).reshape(out_ch, out_rows, out_cols, n_ex).transpose(3, 1, 2, 0)
return Z
The provided code snippet includes necessary dependencies for implementing the `conv1D` function. Write a Python function `def conv1D(X, W, stride, pad, dilation=0)` to solve the following problem:
A faster (but more memory intensive) implementation of a 1D "convolution" (technically, cross-correlation) of input `X` with a collection of kernels in `W`. Notes ----- Relies on the :func:`im2col` function to perform the convolution as a single matrix multiplication. For a helpful diagram, see Pete Warden's 2015 blogpost [1]. References ---------- .. [1] Warden (2015). "Why GEMM is at the heart of deep learning," https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/ Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_in, in_ch)` Input volume (unpadded) W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_width, in_ch, out_ch)` A volume of convolution weights/kernels for a given layer stride : int The stride of each convolution kernel pad : tuple, int, or 'same' The padding amount. If 'same', add padding to ensure that the output of a 1D convolution with a kernel of `kernel_shape` and stride `stride` produces an output volume of the same dimensions as the input. If 2-tuple, specifies the number of padding colums to add *on both sides* of the columns in X. dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_out, out_ch)` The convolution of X with W.
Here is the function:
def conv1D(X, W, stride, pad, dilation=0):
"""
A faster (but more memory intensive) implementation of a 1D "convolution"
(technically, cross-correlation) of input `X` with a collection of kernels in
`W`.
Notes
-----
Relies on the :func:`im2col` function to perform the convolution as a single
matrix multiplication.
For a helpful diagram, see Pete Warden's 2015 blogpost [1].
References
----------
.. [1] Warden (2015). "Why GEMM is at the heart of deep learning,"
https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_in, in_ch)`
Input volume (unpadded)
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_width, in_ch, out_ch)`
A volume of convolution weights/kernels for a given layer
stride : int
The stride of each convolution kernel
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 1D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding colums to add *on both sides*
of the columns in X.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_out, out_ch)`
The convolution of X with W.
"""
_, p = pad1D(X, pad, W.shape[0], stride, dilation=dilation)
# add a row dimension to X to permit us to use im2col/col2im
X2D = np.expand_dims(X, axis=1)
W2D = np.expand_dims(W, axis=0)
p2D = (0, 0, p[0], p[1])
Z2D = conv2D(X2D, W2D, stride, p2D, dilation)
# drop the row dimension
return np.squeeze(Z2D, axis=1) | A faster (but more memory intensive) implementation of a 1D "convolution" (technically, cross-correlation) of input `X` with a collection of kernels in `W`. Notes ----- Relies on the :func:`im2col` function to perform the convolution as a single matrix multiplication. For a helpful diagram, see Pete Warden's 2015 blogpost [1]. References ---------- .. [1] Warden (2015). "Why GEMM is at the heart of deep learning," https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/ Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_in, in_ch)` Input volume (unpadded) W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_width, in_ch, out_ch)` A volume of convolution weights/kernels for a given layer stride : int The stride of each convolution kernel pad : tuple, int, or 'same' The padding amount. If 'same', add padding to ensure that the output of a 1D convolution with a kernel of `kernel_shape` and stride `stride` produces an output volume of the same dimensions as the input. If 2-tuple, specifies the number of padding colums to add *on both sides* of the columns in X. dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_out, out_ch)` The convolution of X with W. |
18,193 | import numpy as np
def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride, dilation=0):
"""
Compute the padding necessary to ensure that convolving `X` with a 2D kernel
of shape `kernel_shape` and stride `stride` produces outputs with dimension
`out_dim`.
Parameters
----------
X_shape : tuple of `(n_ex, in_rows, in_cols, in_ch)`
Dimensions of the input volume. Padding is applied to `in_rows` and
`in_cols`.
out_dim : tuple of `(out_rows, out_cols)`
The desired dimension of an output example after applying the
convolution.
kernel_shape : 2-tuple
The dimension of the 2D convolution kernel.
stride : int
The stride for the convolution kernel.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
padding_dims : 4-tuple
Padding dims for `X`. Organized as (left, right, up, down)
"""
if not isinstance(X_shape, tuple):
raise ValueError("`X_shape` must be of type tuple")
if not isinstance(out_dim, tuple):
raise ValueError("`out_dim` must be of type tuple")
if not isinstance(kernel_shape, tuple):
raise ValueError("`kernel_shape` must be of type tuple")
if not isinstance(stride, int):
raise ValueError("`stride` must be of type int")
d = dilation
fr, fc = kernel_shape
out_rows, out_cols = out_dim
n_ex, in_rows, in_cols, in_ch = X_shape
# update effective filter shape based on dilation factor
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
pr = int((stride * (out_rows - 1) + _fr - in_rows) / 2)
pc = int((stride * (out_cols - 1) + _fc - in_cols) / 2)
out_rows1 = int(1 + (in_rows + 2 * pr - _fr) / stride)
out_cols1 = int(1 + (in_cols + 2 * pc - _fc) / stride)
# add asymmetric padding pixels to right / bottom
pr1, pr2 = pr, pr
if out_rows1 == out_rows - 1:
pr1, pr2 = pr, pr + 1
elif out_rows1 != out_rows:
raise AssertionError
pc1, pc2 = pc, pc
if out_cols1 == out_cols - 1:
pc1, pc2 = pc, pc + 1
elif out_cols1 != out_cols:
raise AssertionError
if any(np.array([pr1, pr2, pc1, pc2]) < 0):
raise ValueError(
"Padding cannot be less than 0. Got: {}".format((pr1, pr2, pc1, pc2))
)
return (pr1, pr2, pc1, pc2)
def pad2D(X, pad, kernel_shape=None, stride=None, dilation=0):
"""
Zero-pad a 4D input volume `X` along the second and third dimensions.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume. Padding is applied to `in_rows` and `in_cols`.
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
has the same dimensions as the input. If 2-tuple, specifies the number
of padding rows and colums to add *on both sides* of the rows/columns
in `X`. If 4-tuple, specifies the number of rows/columns to add to the
top, bottom, left, and right of the input volume.
kernel_shape : 2-tuple
The dimension of the 2D convolution kernel. Only relevant if p='same'.
Default is None.
stride : int
The stride for the convolution kernel. Only relevant if p='same'.
Default is None.
dilation : int
The dilation of the convolution kernel. Only relevant if p='same'.
Default is 0.
Returns
-------
X_pad : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, padded_in_rows, padded_in_cols, in_channels)`
The padded output volume.
p : 4-tuple
The number of 0-padded rows added to the (top, bottom, left, right) of
`X`.
"""
p = pad
if isinstance(p, int):
p = (p, p, p, p)
if isinstance(p, tuple):
if len(p) == 2:
p = (p[0], p[0], p[1], p[1])
X_pad = np.pad(
X,
pad_width=((0, 0), (p[0], p[1]), (p[2], p[3]), (0, 0)),
mode="constant",
constant_values=0,
)
# compute the correct padding dims for a 'same' convolution
if p == "same" and kernel_shape and stride is not None:
p = calc_pad_dims_2D(
X.shape, X.shape[1:3], kernel_shape, stride, dilation=dilation
)
X_pad, p = pad2D(X, p)
return X_pad, p
def dilate(X, d):
"""
Dilate the 4D volume `X` by `d`.
Notes
-----
For a visual depiction of a dilated convolution, see [1].
References
----------
.. [1] Dumoulin & Visin (2016). "A guide to convolution arithmetic for deep
learning." https://arxiv.org/pdf/1603.07285v1.pdf
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume.
d : int
The number of 0-rows to insert between each adjacent row + column in `X`.
Returns
-------
Xd : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, out_ch)`
The dilated array where
.. math::
\\text{out_rows} &= \\text{in_rows} + d(\\text{in_rows} - 1) \\\\
\\text{out_cols} &= \\text{in_cols} + d (\\text{in_cols} - 1)
"""
n_ex, in_rows, in_cols, n_in = X.shape
r_ix = np.repeat(np.arange(1, in_rows), d)
c_ix = np.repeat(np.arange(1, in_cols), d)
Xd = np.insert(X, r_ix, 0, axis=1)
Xd = np.insert(Xd, c_ix, 0, axis=2)
return Xd
def conv2D(X, W, stride, pad, dilation=0):
"""
A faster (but more memory intensive) implementation of the 2D "convolution"
(technically, cross-correlation) of input `X` with a collection of kernels in
`W`.
Notes
-----
Relies on the :func:`im2col` function to perform the convolution as a single
matrix multiplication.
For a helpful diagram, see Pete Warden's 2015 blogpost [1].
References
----------
.. [1] Warden (2015). "Why GEMM is at the heart of deep learning,"
https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume (unpadded).
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)`
A volume of convolution weights/kernels for a given layer.
stride : int
The stride of each convolution kernel.
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding rows and colums to add *on both
sides* of the rows/columns in `X`. If 4-tuple, specifies the number of
rows/columns to add to the top, bottom, left, and right of the input
volume.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, out_ch)`
The covolution of `X` with `W`.
"""
s, d = stride, dilation
_, p = pad2D(X, pad, W.shape[:2], s, dilation=dilation)
pr1, pr2, pc1, pc2 = p
fr, fc, in_ch, out_ch = W.shape
n_ex, in_rows, in_cols, in_ch = X.shape
# update effective filter shape based on dilation factor
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
# compute the dimensions of the convolution output
out_rows = int((in_rows + pr1 + pr2 - _fr) / s + 1)
out_cols = int((in_cols + pc1 + pc2 - _fc) / s + 1)
# convert X and W into the appropriate 2D matrices and take their product
X_col, _ = im2col(X, W.shape, p, s, d)
W_col = W.transpose(3, 2, 0, 1).reshape(out_ch, -1)
Z = (W_col @ X_col).reshape(out_ch, out_rows, out_cols, n_ex).transpose(3, 1, 2, 0)
return Z
The provided code snippet includes necessary dependencies for implementing the `deconv2D_naive` function. Write a Python function `def deconv2D_naive(X, W, stride, pad, dilation=0)` to solve the following problem:
Perform a "deconvolution" (more accurately, a transposed convolution) of an input volume `X` with a weight kernel `W`, incorporating stride, pad, and dilation. Notes ----- Rather than using the transpose of the convolution matrix, this approach uses a direct convolution with zero padding, which, while conceptually straightforward, is computationally inefficient. For further explanation, see [1]. References ---------- .. [1] Dumoulin & Visin (2016). "A guide to convolution arithmetic for deep learning." https://arxiv.org/pdf/1603.07285v1.pdf Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)` Input volume (not padded) W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)` A volume of convolution weights/kernels for a given layer stride : int The stride of each convolution kernel pad : tuple, int, or 'same' The padding amount. If 'same', add padding to ensure that the output of a 2D convolution with a kernel of `kernel_shape` and stride `stride` produces an output volume of the same dimensions as the input. If 2-tuple, specifies the number of padding rows and colums to add *on both sides* of the rows/columns in `X`. If 4-tuple, specifies the number of rows/columns to add to the top, bottom, left, and right of the input volume. dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- Y : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, n_out)` The decovolution of (padded) input volume `X` with `W` using stride `s` and dilation `d`.
Here is the function:
def deconv2D_naive(X, W, stride, pad, dilation=0):
"""
Perform a "deconvolution" (more accurately, a transposed convolution) of an
input volume `X` with a weight kernel `W`, incorporating stride, pad, and
dilation.
Notes
-----
Rather than using the transpose of the convolution matrix, this approach
uses a direct convolution with zero padding, which, while conceptually
straightforward, is computationally inefficient.
For further explanation, see [1].
References
----------
.. [1] Dumoulin & Visin (2016). "A guide to convolution arithmetic for deep
learning." https://arxiv.org/pdf/1603.07285v1.pdf
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume (not padded)
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)`
A volume of convolution weights/kernels for a given layer
stride : int
The stride of each convolution kernel
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding rows and colums to add *on both
sides* of the rows/columns in `X`. If 4-tuple, specifies the number of
rows/columns to add to the top, bottom, left, and right of the input
volume.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Y : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, n_out)`
The decovolution of (padded) input volume `X` with `W` using stride `s` and
dilation `d`.
"""
if stride > 1:
X = dilate(X, stride - 1)
stride = 1
# pad the input
X_pad, p = pad2D(X, pad, W.shape[:2], stride=stride, dilation=dilation)
n_ex, in_rows, in_cols, n_in = X_pad.shape
fr, fc, n_in, n_out = W.shape
s, d = stride, dilation
pr1, pr2, pc1, pc2 = p
# update effective filter shape based on dilation factor
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
# compute deconvolution output dims
out_rows = s * (in_rows - 1) - pr1 - pr2 + _fr
out_cols = s * (in_cols - 1) - pc1 - pc2 + _fc
out_dim = (out_rows, out_cols)
# add additional padding to achieve the target output dim
_p = calc_pad_dims_2D(X_pad.shape, out_dim, W.shape[:2], s, d)
X_pad, pad = pad2D(X_pad, _p, W.shape[:2], stride=s, dilation=dilation)
# perform the forward convolution using the flipped weight matrix (note
# we set pad to 0, since we've already added padding)
Z = conv2D(X_pad, np.rot90(W, 2), s, 0, d)
pr2 = None if pr2 == 0 else -pr2
pc2 = None if pc2 == 0 else -pc2
return Z[:, pr1:pr2, pc1:pc2, :] | Perform a "deconvolution" (more accurately, a transposed convolution) of an input volume `X` with a weight kernel `W`, incorporating stride, pad, and dilation. Notes ----- Rather than using the transpose of the convolution matrix, this approach uses a direct convolution with zero padding, which, while conceptually straightforward, is computationally inefficient. For further explanation, see [1]. References ---------- .. [1] Dumoulin & Visin (2016). "A guide to convolution arithmetic for deep learning." https://arxiv.org/pdf/1603.07285v1.pdf Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)` Input volume (not padded) W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)` A volume of convolution weights/kernels for a given layer stride : int The stride of each convolution kernel pad : tuple, int, or 'same' The padding amount. If 'same', add padding to ensure that the output of a 2D convolution with a kernel of `kernel_shape` and stride `stride` produces an output volume of the same dimensions as the input. If 2-tuple, specifies the number of padding rows and colums to add *on both sides* of the rows/columns in `X`. If 4-tuple, specifies the number of rows/columns to add to the top, bottom, left, and right of the input volume. dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- Y : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, n_out)` The decovolution of (padded) input volume `X` with `W` using stride `s` and dilation `d`. |
18,194 | import numpy as np
def pad2D(X, pad, kernel_shape=None, stride=None, dilation=0):
"""
Zero-pad a 4D input volume `X` along the second and third dimensions.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume. Padding is applied to `in_rows` and `in_cols`.
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
has the same dimensions as the input. If 2-tuple, specifies the number
of padding rows and colums to add *on both sides* of the rows/columns
in `X`. If 4-tuple, specifies the number of rows/columns to add to the
top, bottom, left, and right of the input volume.
kernel_shape : 2-tuple
The dimension of the 2D convolution kernel. Only relevant if p='same'.
Default is None.
stride : int
The stride for the convolution kernel. Only relevant if p='same'.
Default is None.
dilation : int
The dilation of the convolution kernel. Only relevant if p='same'.
Default is 0.
Returns
-------
X_pad : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, padded_in_rows, padded_in_cols, in_channels)`
The padded output volume.
p : 4-tuple
The number of 0-padded rows added to the (top, bottom, left, right) of
`X`.
"""
p = pad
if isinstance(p, int):
p = (p, p, p, p)
if isinstance(p, tuple):
if len(p) == 2:
p = (p[0], p[0], p[1], p[1])
X_pad = np.pad(
X,
pad_width=((0, 0), (p[0], p[1]), (p[2], p[3]), (0, 0)),
mode="constant",
constant_values=0,
)
# compute the correct padding dims for a 'same' convolution
if p == "same" and kernel_shape and stride is not None:
p = calc_pad_dims_2D(
X.shape, X.shape[1:3], kernel_shape, stride, dilation=dilation
)
X_pad, p = pad2D(X, p)
return X_pad, p
The provided code snippet includes necessary dependencies for implementing the `conv2D_naive` function. Write a Python function `def conv2D_naive(X, W, stride, pad, dilation=0)` to solve the following problem:
A slow but more straightforward implementation of a 2D "convolution" (technically, cross-correlation) of input `X` with a collection of kernels `W`. Notes ----- This implementation uses ``for`` loops and direct indexing to perform the convolution. As a result, it is slower than the vectorized :func:`conv2D` function that relies on the :func:`col2im` and :func:`im2col` transformations. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)` Input volume. W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)` The volume of convolution weights/kernels. stride : int The stride of each convolution kernel. pad : tuple, int, or 'same' The padding amount. If 'same', add padding to ensure that the output of a 2D convolution with a kernel of `kernel_shape` and stride `stride` produces an output volume of the same dimensions as the input. If 2-tuple, specifies the number of padding rows and colums to add *on both sides* of the rows/columns in `X`. If 4-tuple, specifies the number of rows/columns to add to the top, bottom, left, and right of the input volume. dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, out_ch)` The covolution of `X` with `W`.
Here is the function:
def conv2D_naive(X, W, stride, pad, dilation=0):
"""
A slow but more straightforward implementation of a 2D "convolution"
(technically, cross-correlation) of input `X` with a collection of kernels `W`.
Notes
-----
This implementation uses ``for`` loops and direct indexing to perform the
convolution. As a result, it is slower than the vectorized :func:`conv2D`
function that relies on the :func:`col2im` and :func:`im2col`
transformations.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume.
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)`
The volume of convolution weights/kernels.
stride : int
The stride of each convolution kernel.
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding rows and colums to add *on both
sides* of the rows/columns in `X`. If 4-tuple, specifies the number of
rows/columns to add to the top, bottom, left, and right of the input
volume.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, out_ch)`
The covolution of `X` with `W`.
"""
s, d = stride, dilation
X_pad, p = pad2D(X, pad, W.shape[:2], stride=s, dilation=d)
pr1, pr2, pc1, pc2 = p
fr, fc, in_ch, out_ch = W.shape
n_ex, in_rows, in_cols, in_ch = X.shape
# update effective filter shape based on dilation factor
fr, fc = fr * (d + 1) - d, fc * (d + 1) - d
out_rows = int((in_rows + pr1 + pr2 - fr) / s + 1)
out_cols = int((in_cols + pc1 + pc2 - fc) / s + 1)
Z = np.zeros((n_ex, out_rows, out_cols, out_ch))
for m in range(n_ex):
for c in range(out_ch):
for i in range(out_rows):
for j in range(out_cols):
i0, i1 = i * s, (i * s) + fr
j0, j1 = j * s, (j * s) + fc
window = X_pad[m, i0 : i1 : (d + 1), j0 : j1 : (d + 1), :]
Z[m, i, j, c] = np.sum(window * W[:, :, :, c])
return Z | A slow but more straightforward implementation of a 2D "convolution" (technically, cross-correlation) of input `X` with a collection of kernels `W`. Notes ----- This implementation uses ``for`` loops and direct indexing to perform the convolution. As a result, it is slower than the vectorized :func:`conv2D` function that relies on the :func:`col2im` and :func:`im2col` transformations. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)` Input volume. W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)` The volume of convolution weights/kernels. stride : int The stride of each convolution kernel. pad : tuple, int, or 'same' The padding amount. If 'same', add padding to ensure that the output of a 2D convolution with a kernel of `kernel_shape` and stride `stride` produces an output volume of the same dimensions as the input. If 2-tuple, specifies the number of padding rows and colums to add *on both sides* of the rows/columns in `X`. If 4-tuple, specifies the number of rows/columns to add to the top, bottom, left, and right of the input volume. dilation : int Number of pixels inserted between kernel elements. Default is 0. Returns ------- Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, out_ch)` The covolution of `X` with `W`. |
18,195 | import numpy as np
def calc_fan(weight_shape):
"""
Compute the fan-in and fan-out for a weight matrix/volume.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume. The final 2 entries must be
`in_ch`, `out_ch`.
Returns
-------
fan_in : int
The number of input units in the weight tensor
fan_out : int
The number of output units in the weight tensor
"""
if len(weight_shape) == 2:
fan_in, fan_out = weight_shape
elif len(weight_shape) in [3, 4]:
in_ch, out_ch = weight_shape[-2:]
kernel_size = np.prod(weight_shape[:-2])
fan_in, fan_out = in_ch * kernel_size, out_ch * kernel_size
else:
raise ValueError("Unrecognized weight dimension: {}".format(weight_shape))
return fan_in, fan_out
The provided code snippet includes necessary dependencies for implementing the `he_uniform` function. Write a Python function `def he_uniform(weight_shape)` to solve the following problem:
Initializes network weights `W` with using the He uniform initialization strategy. Notes ----- The He uniform initializations trategy initializes thew eights in `W` using draws from Uniform(-b, b) where .. math:: b = \sqrt{\\frac{6}{\\text{fan_in}}} Developed for deep networks with ReLU nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights.
Here is the function:
def he_uniform(weight_shape):
"""
Initializes network weights `W` with using the He uniform initialization
strategy.
Notes
-----
The He uniform initializations trategy initializes thew eights in `W` using
draws from Uniform(-b, b) where
.. math::
b = \sqrt{\\frac{6}{\\text{fan_in}}}
Developed for deep networks with ReLU nonlinearities.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume.
Returns
-------
W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape`
The initialized weights.
"""
fan_in, fan_out = calc_fan(weight_shape)
b = np.sqrt(6 / fan_in)
return np.random.uniform(-b, b, size=weight_shape) | Initializes network weights `W` with using the He uniform initialization strategy. Notes ----- The He uniform initializations trategy initializes thew eights in `W` using draws from Uniform(-b, b) where .. math:: b = \sqrt{\\frac{6}{\\text{fan_in}}} Developed for deep networks with ReLU nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights. |
18,196 | import numpy as np
def calc_fan(weight_shape):
"""
Compute the fan-in and fan-out for a weight matrix/volume.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume. The final 2 entries must be
`in_ch`, `out_ch`.
Returns
-------
fan_in : int
The number of input units in the weight tensor
fan_out : int
The number of output units in the weight tensor
"""
if len(weight_shape) == 2:
fan_in, fan_out = weight_shape
elif len(weight_shape) in [3, 4]:
in_ch, out_ch = weight_shape[-2:]
kernel_size = np.prod(weight_shape[:-2])
fan_in, fan_out = in_ch * kernel_size, out_ch * kernel_size
else:
raise ValueError("Unrecognized weight dimension: {}".format(weight_shape))
return fan_in, fan_out
def truncated_normal(mean, std, out_shape):
"""
Generate draws from a truncated normal distribution via rejection sampling.
Notes
-----
The rejection sampling regimen draws samples from a normal distribution
with mean `mean` and standard deviation `std`, and resamples any values
more than two standard deviations from `mean`.
Parameters
----------
mean : float or array_like of floats
The mean/center of the distribution
std : float or array_like of floats
Standard deviation (spread or "width") of the distribution.
out_shape : int or tuple of ints
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn.
Returns
-------
samples : :py:class:`ndarray <numpy.ndarray>` of shape `out_shape`
Samples from the truncated normal distribution parameterized by `mean`
and `std`.
"""
samples = np.random.normal(loc=mean, scale=std, size=out_shape)
reject = np.logical_or(samples >= mean + 2 * std, samples <= mean - 2 * std)
while any(reject.flatten()):
resamples = np.random.normal(loc=mean, scale=std, size=reject.sum())
samples[reject] = resamples
reject = np.logical_or(samples >= mean + 2 * std, samples <= mean - 2 * std)
return samples
The provided code snippet includes necessary dependencies for implementing the `he_normal` function. Write a Python function `def he_normal(weight_shape)` to solve the following problem:
Initialize network weights `W` using the He normal initialization strategy. Notes ----- The He normal initialization strategy initializes the weights in `W` using draws from TruncatedNormal(0, b) where the variance `b` is .. math:: b = \\frac{2}{\\text{fan_in}} He normal initialization was originally developed for deep networks with :class:`~numpy_ml.neural_nets.activations.ReLU` nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights.
Here is the function:
def he_normal(weight_shape):
"""
Initialize network weights `W` using the He normal initialization strategy.
Notes
-----
The He normal initialization strategy initializes the weights in `W` using
draws from TruncatedNormal(0, b) where the variance `b` is
.. math::
b = \\frac{2}{\\text{fan_in}}
He normal initialization was originally developed for deep networks with
:class:`~numpy_ml.neural_nets.activations.ReLU` nonlinearities.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume.
Returns
-------
W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape`
The initialized weights.
"""
fan_in, fan_out = calc_fan(weight_shape)
std = np.sqrt(2 / fan_in)
return truncated_normal(0, std, weight_shape) | Initialize network weights `W` using the He normal initialization strategy. Notes ----- The He normal initialization strategy initializes the weights in `W` using draws from TruncatedNormal(0, b) where the variance `b` is .. math:: b = \\frac{2}{\\text{fan_in}} He normal initialization was originally developed for deep networks with :class:`~numpy_ml.neural_nets.activations.ReLU` nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights. |
18,197 | import numpy as np
def calc_fan(weight_shape):
"""
Compute the fan-in and fan-out for a weight matrix/volume.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume. The final 2 entries must be
`in_ch`, `out_ch`.
Returns
-------
fan_in : int
The number of input units in the weight tensor
fan_out : int
The number of output units in the weight tensor
"""
if len(weight_shape) == 2:
fan_in, fan_out = weight_shape
elif len(weight_shape) in [3, 4]:
in_ch, out_ch = weight_shape[-2:]
kernel_size = np.prod(weight_shape[:-2])
fan_in, fan_out = in_ch * kernel_size, out_ch * kernel_size
else:
raise ValueError("Unrecognized weight dimension: {}".format(weight_shape))
return fan_in, fan_out
The provided code snippet includes necessary dependencies for implementing the `glorot_uniform` function. Write a Python function `def glorot_uniform(weight_shape, gain=1.0)` to solve the following problem:
Initialize network weights `W` using the Glorot uniform initialization strategy. Notes ----- The Glorot uniform initialization strategy initializes weights using draws from ``Uniform(-b, b)`` where: .. math:: b = \\text{gain} \sqrt{\\frac{6}{\\text{fan_in} + \\text{fan_out}}} The motivation for Glorot uniform initialization is to choose weights to ensure that the variance of the layer outputs are approximately equal to the variance of its inputs. This initialization strategy was primarily developed for deep networks with tanh and logistic sigmoid nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights.
Here is the function:
def glorot_uniform(weight_shape, gain=1.0):
"""
Initialize network weights `W` using the Glorot uniform initialization
strategy.
Notes
-----
The Glorot uniform initialization strategy initializes weights using draws
from ``Uniform(-b, b)`` where:
.. math::
b = \\text{gain} \sqrt{\\frac{6}{\\text{fan_in} + \\text{fan_out}}}
The motivation for Glorot uniform initialization is to choose weights to
ensure that the variance of the layer outputs are approximately equal to
the variance of its inputs.
This initialization strategy was primarily developed for deep networks with
tanh and logistic sigmoid nonlinearities.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume.
Returns
-------
W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape`
The initialized weights.
"""
fan_in, fan_out = calc_fan(weight_shape)
b = gain * np.sqrt(6 / (fan_in + fan_out))
return np.random.uniform(-b, b, size=weight_shape) | Initialize network weights `W` using the Glorot uniform initialization strategy. Notes ----- The Glorot uniform initialization strategy initializes weights using draws from ``Uniform(-b, b)`` where: .. math:: b = \\text{gain} \sqrt{\\frac{6}{\\text{fan_in} + \\text{fan_out}}} The motivation for Glorot uniform initialization is to choose weights to ensure that the variance of the layer outputs are approximately equal to the variance of its inputs. This initialization strategy was primarily developed for deep networks with tanh and logistic sigmoid nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights. |
18,198 | import numpy as np
def calc_fan(weight_shape):
"""
Compute the fan-in and fan-out for a weight matrix/volume.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume. The final 2 entries must be
`in_ch`, `out_ch`.
Returns
-------
fan_in : int
The number of input units in the weight tensor
fan_out : int
The number of output units in the weight tensor
"""
if len(weight_shape) == 2:
fan_in, fan_out = weight_shape
elif len(weight_shape) in [3, 4]:
in_ch, out_ch = weight_shape[-2:]
kernel_size = np.prod(weight_shape[:-2])
fan_in, fan_out = in_ch * kernel_size, out_ch * kernel_size
else:
raise ValueError("Unrecognized weight dimension: {}".format(weight_shape))
return fan_in, fan_out
def truncated_normal(mean, std, out_shape):
"""
Generate draws from a truncated normal distribution via rejection sampling.
Notes
-----
The rejection sampling regimen draws samples from a normal distribution
with mean `mean` and standard deviation `std`, and resamples any values
more than two standard deviations from `mean`.
Parameters
----------
mean : float or array_like of floats
The mean/center of the distribution
std : float or array_like of floats
Standard deviation (spread or "width") of the distribution.
out_shape : int or tuple of ints
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn.
Returns
-------
samples : :py:class:`ndarray <numpy.ndarray>` of shape `out_shape`
Samples from the truncated normal distribution parameterized by `mean`
and `std`.
"""
samples = np.random.normal(loc=mean, scale=std, size=out_shape)
reject = np.logical_or(samples >= mean + 2 * std, samples <= mean - 2 * std)
while any(reject.flatten()):
resamples = np.random.normal(loc=mean, scale=std, size=reject.sum())
samples[reject] = resamples
reject = np.logical_or(samples >= mean + 2 * std, samples <= mean - 2 * std)
return samples
The provided code snippet includes necessary dependencies for implementing the `glorot_normal` function. Write a Python function `def glorot_normal(weight_shape, gain=1.0)` to solve the following problem:
Initialize network weights `W` using the Glorot normal initialization strategy. Notes ----- The Glorot normal initializaiton initializes weights with draws from TruncatedNormal(0, b) where the variance `b` is .. math:: b = \\frac{2 \\text{gain}^2}{\\text{fan_in} + \\text{fan_out}} The motivation for Glorot normal initialization is to choose weights to ensure that the variance of the layer outputs are approximately equal to the variance of its inputs. This initialization strategy was primarily developed for deep networks with :class:`~numpy_ml.neural_nets.activations.Tanh` and :class:`~numpy_ml.neural_nets.activations.Sigmoid` nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights.
Here is the function:
def glorot_normal(weight_shape, gain=1.0):
"""
Initialize network weights `W` using the Glorot normal initialization strategy.
Notes
-----
The Glorot normal initializaiton initializes weights with draws from
TruncatedNormal(0, b) where the variance `b` is
.. math::
b = \\frac{2 \\text{gain}^2}{\\text{fan_in} + \\text{fan_out}}
The motivation for Glorot normal initialization is to choose weights to
ensure that the variance of the layer outputs are approximately equal to
the variance of its inputs.
This initialization strategy was primarily developed for deep networks with
:class:`~numpy_ml.neural_nets.activations.Tanh` and
:class:`~numpy_ml.neural_nets.activations.Sigmoid` nonlinearities.
Parameters
----------
weight_shape : tuple
The dimensions of the weight matrix/volume.
Returns
-------
W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape`
The initialized weights.
"""
fan_in, fan_out = calc_fan(weight_shape)
std = gain * np.sqrt(2 / (fan_in + fan_out))
return truncated_normal(0, std, weight_shape) | Initialize network weights `W` using the Glorot normal initialization strategy. Notes ----- The Glorot normal initializaiton initializes weights with draws from TruncatedNormal(0, b) where the variance `b` is .. math:: b = \\frac{2 \\text{gain}^2}{\\text{fan_in} + \\text{fan_out}} The motivation for Glorot normal initialization is to choose weights to ensure that the variance of the layer outputs are approximately equal to the variance of its inputs. This initialization strategy was primarily developed for deep networks with :class:`~numpy_ml.neural_nets.activations.Tanh` and :class:`~numpy_ml.neural_nets.activations.Sigmoid` nonlinearities. Parameters ---------- weight_shape : tuple The dimensions of the weight matrix/volume. Returns ------- W : :py:class:`ndarray <numpy.ndarray>` of shape `weight_shape` The initialized weights. |
18,199 | import numpy as np
def generalized_cosine(window_len, coefs, symmetric=False):
"""
The generalized cosine family of window functions.
Notes
-----
The generalized cosine window is a simple weighted sum of cosine terms.
For :math:`n \in \{0, \ldots, \\text{window_len} \}`:
.. math::
\\text{GCW}(n) = \sum_{k=0}^K (-1)^k a_k \cos\left(\\frac{2 \pi k n}{\\text{window_len}}\\right)
Parameters
----------
window_len : int
The length of the window in samples. Should be equal to the
`frame_width` if applying to a windowed signal.
coefs: list of floats
The :math:`a_k` coefficient values
symmetric : bool
If False, create a 'periodic' window that can be used in with an FFT /
in spectral analysis. If True, generate a symmetric window that can be
used in, e.g., filter design. Default is False.
Returns
-------
window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)`
The window
"""
window_len += 1 if not symmetric else 0
entries = np.linspace(-np.pi, np.pi, window_len) # (-1)^k * 2pi*n / window_len
window = np.sum([ak * np.cos(k * entries) for k, ak in enumerate(coefs)], axis=0)
return window[:-1] if not symmetric else window
The provided code snippet includes necessary dependencies for implementing the `blackman_harris` function. Write a Python function `def blackman_harris(window_len, symmetric=False)` to solve the following problem:
The Blackman-Harris window. Notes ----- The Blackman-Harris window is an instance of the more general class of cosine-sum windows where `K=3`. Additional coefficients extend the Hamming window to further minimize the magnitude of the nearest side-lobe in the frequency response. .. math:: \\text{bh}(n) = a_0 - a_1 \cos\left(\\frac{2 \pi n}{N}\\right) + a_2 \cos\left(\\frac{4 \pi n }{N}\\right) - a_3 \cos\left(\\frac{6 \pi n}{N}\\right) where `N` = `window_len` - 1, :math:`a_0` = 0.35875, :math:`a_1` = 0.48829, :math:`a_2` = 0.14128, and :math:`a_3` = 0.01168. Parameters ---------- window_len : int The length of the window in samples. Should be equal to the `frame_width` if applying to a windowed signal. symmetric : bool If False, create a 'periodic' window that can be used in with an FFT / in spectral analysis. If True, generate a symmetric window that can be used in, e.g., filter design. Default is False. Returns ------- window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)` The window
Here is the function:
def blackman_harris(window_len, symmetric=False):
"""
The Blackman-Harris window.
Notes
-----
The Blackman-Harris window is an instance of the more general class of
cosine-sum windows where `K=3`. Additional coefficients extend the Hamming
window to further minimize the magnitude of the nearest side-lobe in the
frequency response.
.. math::
\\text{bh}(n) = a_0 - a_1 \cos\left(\\frac{2 \pi n}{N}\\right) +
a_2 \cos\left(\\frac{4 \pi n }{N}\\right) -
a_3 \cos\left(\\frac{6 \pi n}{N}\\right)
where `N` = `window_len` - 1, :math:`a_0` = 0.35875, :math:`a_1` = 0.48829,
:math:`a_2` = 0.14128, and :math:`a_3` = 0.01168.
Parameters
----------
window_len : int
The length of the window in samples. Should be equal to the
`frame_width` if applying to a windowed signal.
symmetric : bool
If False, create a 'periodic' window that can be used in with an FFT /
in spectral analysis. If True, generate a symmetric window that can be
used in, e.g., filter design. Default is False.
Returns
-------
window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)`
The window
"""
return generalized_cosine(
window_len, [0.35875, 0.48829, 0.14128, 0.01168], symmetric
) | The Blackman-Harris window. Notes ----- The Blackman-Harris window is an instance of the more general class of cosine-sum windows where `K=3`. Additional coefficients extend the Hamming window to further minimize the magnitude of the nearest side-lobe in the frequency response. .. math:: \\text{bh}(n) = a_0 - a_1 \cos\left(\\frac{2 \pi n}{N}\\right) + a_2 \cos\left(\\frac{4 \pi n }{N}\\right) - a_3 \cos\left(\\frac{6 \pi n}{N}\\right) where `N` = `window_len` - 1, :math:`a_0` = 0.35875, :math:`a_1` = 0.48829, :math:`a_2` = 0.14128, and :math:`a_3` = 0.01168. Parameters ---------- window_len : int The length of the window in samples. Should be equal to the `frame_width` if applying to a windowed signal. symmetric : bool If False, create a 'periodic' window that can be used in with an FFT / in spectral analysis. If True, generate a symmetric window that can be used in, e.g., filter design. Default is False. Returns ------- window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)` The window |
18,200 | import numpy as np
def generalized_cosine(window_len, coefs, symmetric=False):
"""
The generalized cosine family of window functions.
Notes
-----
The generalized cosine window is a simple weighted sum of cosine terms.
For :math:`n \in \{0, \ldots, \\text{window_len} \}`:
.. math::
\\text{GCW}(n) = \sum_{k=0}^K (-1)^k a_k \cos\left(\\frac{2 \pi k n}{\\text{window_len}}\\right)
Parameters
----------
window_len : int
The length of the window in samples. Should be equal to the
`frame_width` if applying to a windowed signal.
coefs: list of floats
The :math:`a_k` coefficient values
symmetric : bool
If False, create a 'periodic' window that can be used in with an FFT /
in spectral analysis. If True, generate a symmetric window that can be
used in, e.g., filter design. Default is False.
Returns
-------
window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)`
The window
"""
window_len += 1 if not symmetric else 0
entries = np.linspace(-np.pi, np.pi, window_len) # (-1)^k * 2pi*n / window_len
window = np.sum([ak * np.cos(k * entries) for k, ak in enumerate(coefs)], axis=0)
return window[:-1] if not symmetric else window
The provided code snippet includes necessary dependencies for implementing the `hamming` function. Write a Python function `def hamming(window_len, symmetric=False)` to solve the following problem:
The Hamming window. Notes ----- The Hamming window is an instance of the more general class of cosine-sum windows where `K=1` and :math:`a_0 = 0.54`. Coefficients selected to minimize the magnitude of the nearest side-lobe in the frequency response. .. math:: \\text{hamming}(n) = 0.54 - 0.46 \cos\left(\\frac{2 \pi n}{\\text{window_len} - 1}\\right) Parameters ---------- window_len : int The length of the window in samples. Should be equal to the `frame_width` if applying to a windowed signal. symmetric : bool If False, create a 'periodic' window that can be used in with an FFT / in spectral analysis. If True, generate a symmetric window that can be used in, e.g., filter design. Default is False. Returns ------- window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)` The window
Here is the function:
def hamming(window_len, symmetric=False):
"""
The Hamming window.
Notes
-----
The Hamming window is an instance of the more general class of cosine-sum
windows where `K=1` and :math:`a_0 = 0.54`. Coefficients selected to
minimize the magnitude of the nearest side-lobe in the frequency response.
.. math::
\\text{hamming}(n) = 0.54 -
0.46 \cos\left(\\frac{2 \pi n}{\\text{window_len} - 1}\\right)
Parameters
----------
window_len : int
The length of the window in samples. Should be equal to the
`frame_width` if applying to a windowed signal.
symmetric : bool
If False, create a 'periodic' window that can be used in with an FFT /
in spectral analysis. If True, generate a symmetric window that can be
used in, e.g., filter design. Default is False.
Returns
-------
window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)`
The window
"""
return generalized_cosine(window_len, [0.54, 1 - 0.54], symmetric) | The Hamming window. Notes ----- The Hamming window is an instance of the more general class of cosine-sum windows where `K=1` and :math:`a_0 = 0.54`. Coefficients selected to minimize the magnitude of the nearest side-lobe in the frequency response. .. math:: \\text{hamming}(n) = 0.54 - 0.46 \cos\left(\\frac{2 \pi n}{\\text{window_len} - 1}\\right) Parameters ---------- window_len : int The length of the window in samples. Should be equal to the `frame_width` if applying to a windowed signal. symmetric : bool If False, create a 'periodic' window that can be used in with an FFT / in spectral analysis. If True, generate a symmetric window that can be used in, e.g., filter design. Default is False. Returns ------- window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)` The window |
18,201 | import numpy as np
def generalized_cosine(window_len, coefs, symmetric=False):
"""
The generalized cosine family of window functions.
Notes
-----
The generalized cosine window is a simple weighted sum of cosine terms.
For :math:`n \in \{0, \ldots, \\text{window_len} \}`:
.. math::
\\text{GCW}(n) = \sum_{k=0}^K (-1)^k a_k \cos\left(\\frac{2 \pi k n}{\\text{window_len}}\\right)
Parameters
----------
window_len : int
The length of the window in samples. Should be equal to the
`frame_width` if applying to a windowed signal.
coefs: list of floats
The :math:`a_k` coefficient values
symmetric : bool
If False, create a 'periodic' window that can be used in with an FFT /
in spectral analysis. If True, generate a symmetric window that can be
used in, e.g., filter design. Default is False.
Returns
-------
window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)`
The window
"""
window_len += 1 if not symmetric else 0
entries = np.linspace(-np.pi, np.pi, window_len) # (-1)^k * 2pi*n / window_len
window = np.sum([ak * np.cos(k * entries) for k, ak in enumerate(coefs)], axis=0)
return window[:-1] if not symmetric else window
The provided code snippet includes necessary dependencies for implementing the `hann` function. Write a Python function `def hann(window_len, symmetric=False)` to solve the following problem:
The Hann window. Notes ----- The Hann window is an instance of the more general class of cosine-sum windows where `K=1` and :math:`a_0` = 0.5. Unlike the Hamming window, the end points of the Hann window touch zero. .. math:: \\text{hann}(n) = 0.5 - 0.5 \cos\left(\\frac{2 \pi n}{\\text{window_len} - 1}\\right) Parameters ---------- window_len : int The length of the window in samples. Should be equal to the `frame_width` if applying to a windowed signal. symmetric : bool If False, create a 'periodic' window that can be used in with an FFT / in spectral analysis. If True, generate a symmetric window that can be used in, e.g., filter design. Default is False. Returns ------- window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)` The window
Here is the function:
def hann(window_len, symmetric=False):
"""
The Hann window.
Notes
-----
The Hann window is an instance of the more general class of cosine-sum
windows where `K=1` and :math:`a_0` = 0.5. Unlike the Hamming window, the
end points of the Hann window touch zero.
.. math::
\\text{hann}(n) = 0.5 - 0.5 \cos\left(\\frac{2 \pi n}{\\text{window_len} - 1}\\right)
Parameters
----------
window_len : int
The length of the window in samples. Should be equal to the
`frame_width` if applying to a windowed signal.
symmetric : bool
If False, create a 'periodic' window that can be used in with an FFT /
in spectral analysis. If True, generate a symmetric window that can be
used in, e.g., filter design. Default is False.
Returns
-------
window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)`
The window
"""
return generalized_cosine(window_len, [0.5, 0.5], symmetric) | The Hann window. Notes ----- The Hann window is an instance of the more general class of cosine-sum windows where `K=1` and :math:`a_0` = 0.5. Unlike the Hamming window, the end points of the Hann window touch zero. .. math:: \\text{hann}(n) = 0.5 - 0.5 \cos\left(\\frac{2 \pi n}{\\text{window_len} - 1}\\right) Parameters ---------- window_len : int The length of the window in samples. Should be equal to the `frame_width` if applying to a windowed signal. symmetric : bool If False, create a 'periodic' window that can be used in with an FFT / in spectral analysis. If True, generate a symmetric window that can be used in, e.g., filter design. Default is False. Returns ------- window : :py:class:`ndarray <numpy.ndarray>` of shape `(window_len,)` The window |
18,202 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `euclidean` function. Write a Python function `def euclidean(x, y)` to solve the following problem:
Compute the Euclidean (`L2`) distance between two real vectors Notes ----- The Euclidean distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \sqrt{ \sum_i (x_i - y_i)^2 } Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between Returns ------- d : float The L2 distance between **x** and **y**.
Here is the function:
def euclidean(x, y):
"""
Compute the Euclidean (`L2`) distance between two real vectors
Notes
-----
The Euclidean distance between two vectors **x** and **y** is
.. math::
d(\mathbf{x}, \mathbf{y}) = \sqrt{ \sum_i (x_i - y_i)^2 }
Parameters
----------
x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)`
The two vectors to compute the distance between
Returns
-------
d : float
The L2 distance between **x** and **y**.
"""
return np.sqrt(np.sum((x - y) ** 2)) | Compute the Euclidean (`L2`) distance between two real vectors Notes ----- The Euclidean distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \sqrt{ \sum_i (x_i - y_i)^2 } Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between Returns ------- d : float The L2 distance between **x** and **y**. |
18,203 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `manhattan` function. Write a Python function `def manhattan(x, y)` to solve the following problem:
Compute the Manhattan (`L1`) distance between two real vectors Notes ----- The Manhattan distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \sum_i |x_i - y_i| Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between Returns ------- d : float The L1 distance between **x** and **y**.
Here is the function:
def manhattan(x, y):
"""
Compute the Manhattan (`L1`) distance between two real vectors
Notes
-----
The Manhattan distance between two vectors **x** and **y** is
.. math::
d(\mathbf{x}, \mathbf{y}) = \sum_i |x_i - y_i|
Parameters
----------
x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)`
The two vectors to compute the distance between
Returns
-------
d : float
The L1 distance between **x** and **y**.
"""
return np.sum(np.abs(x - y)) | Compute the Manhattan (`L1`) distance between two real vectors Notes ----- The Manhattan distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \sum_i |x_i - y_i| Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between Returns ------- d : float The L1 distance between **x** and **y**. |
18,204 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `chebyshev` function. Write a Python function `def chebyshev(x, y)` to solve the following problem:
Compute the Chebyshev (:math:`L_\infty`) distance between two real vectors Notes ----- The Chebyshev distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \max_i |x_i - y_i| Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between Returns ------- d : float The Chebyshev distance between **x** and **y**.
Here is the function:
def chebyshev(x, y):
"""
Compute the Chebyshev (:math:`L_\infty`) distance between two real vectors
Notes
-----
The Chebyshev distance between two vectors **x** and **y** is
.. math::
d(\mathbf{x}, \mathbf{y}) = \max_i |x_i - y_i|
Parameters
----------
x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)`
The two vectors to compute the distance between
Returns
-------
d : float
The Chebyshev distance between **x** and **y**.
"""
return np.max(np.abs(x - y)) | Compute the Chebyshev (:math:`L_\infty`) distance between two real vectors Notes ----- The Chebyshev distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \max_i |x_i - y_i| Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between Returns ------- d : float The Chebyshev distance between **x** and **y**. |
18,205 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `minkowski` function. Write a Python function `def minkowski(x, y, p)` to solve the following problem:
Compute the Minkowski-`p` distance between two real vectors. Notes ----- The Minkowski-`p` distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \left( \sum_i |x_i - y_i|^p \\right)^{1/p} Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between p : float > 1 The parameter of the distance function. When `p = 1`, this is the `L1` distance, and when `p=2`, this is the `L2` distance. For `p < 1`, Minkowski-`p` does not satisfy the triangle inequality and hence is not a valid distance metric. Returns ------- d : float The Minkowski-`p` distance between **x** and **y**.
Here is the function:
def minkowski(x, y, p):
"""
Compute the Minkowski-`p` distance between two real vectors.
Notes
-----
The Minkowski-`p` distance between two vectors **x** and **y** is
.. math::
d(\mathbf{x}, \mathbf{y}) = \left( \sum_i |x_i - y_i|^p \\right)^{1/p}
Parameters
----------
x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)`
The two vectors to compute the distance between
p : float > 1
The parameter of the distance function. When `p = 1`, this is the `L1`
distance, and when `p=2`, this is the `L2` distance. For `p < 1`,
Minkowski-`p` does not satisfy the triangle inequality and hence is not
a valid distance metric.
Returns
-------
d : float
The Minkowski-`p` distance between **x** and **y**.
"""
return np.sum(np.abs(x - y) ** p) ** (1 / p) | Compute the Minkowski-`p` distance between two real vectors. Notes ----- The Minkowski-`p` distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \left( \sum_i |x_i - y_i|^p \\right)^{1/p} Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between p : float > 1 The parameter of the distance function. When `p = 1`, this is the `L1` distance, and when `p=2`, this is the `L2` distance. For `p < 1`, Minkowski-`p` does not satisfy the triangle inequality and hence is not a valid distance metric. Returns ------- d : float The Minkowski-`p` distance between **x** and **y**. |
18,206 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `hamming` function. Write a Python function `def hamming(x, y)` to solve the following problem:
Compute the Hamming distance between two integer-valued vectors. Notes ----- The Hamming distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \\frac{1}{N} \sum_i \mathbb{1}_{x_i \\neq y_i} Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between. Both vectors should be integer-valued. Returns ------- d : float The Hamming distance between **x** and **y**.
Here is the function:
def hamming(x, y):
"""
Compute the Hamming distance between two integer-valued vectors.
Notes
-----
The Hamming distance between two vectors **x** and **y** is
.. math::
d(\mathbf{x}, \mathbf{y}) = \\frac{1}{N} \sum_i \mathbb{1}_{x_i \\neq y_i}
Parameters
----------
x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)`
The two vectors to compute the distance between. Both vectors should be
integer-valued.
Returns
-------
d : float
The Hamming distance between **x** and **y**.
"""
return np.sum(x != y) / len(x) | Compute the Hamming distance between two integer-valued vectors. Notes ----- The Hamming distance between two vectors **x** and **y** is .. math:: d(\mathbf{x}, \mathbf{y}) = \\frac{1}{N} \sum_i \mathbb{1}_{x_i \\neq y_i} Parameters ---------- x,y : :py:class:`ndarray <numpy.ndarray>` s of shape `(N,)` The two vectors to compute the distance between. Both vectors should be integer-valued. Returns ------- d : float The Hamming distance between **x** and **y**. |
18,207 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `logsumexp` function. Write a Python function `def logsumexp(log_probs, axis=None)` to solve the following problem:
Redefine scipy.special.logsumexp see: http://bayesjumping.net/log-sum-exp-trick/
Here is the function:
def logsumexp(log_probs, axis=None):
"""
Redefine scipy.special.logsumexp
see: http://bayesjumping.net/log-sum-exp-trick/
"""
_max = np.max(log_probs)
ds = log_probs - _max
exp_sum = np.exp(ds).sum(axis=axis)
return _max + np.log(exp_sum) | Redefine scipy.special.logsumexp see: http://bayesjumping.net/log-sum-exp-trick/ |
18,208 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `log_gaussian_pdf` function. Write a Python function `def log_gaussian_pdf(x_i, mu, sigma)` to solve the following problem:
Compute log N(x_i | mu, sigma)
Here is the function:
def log_gaussian_pdf(x_i, mu, sigma):
"""Compute log N(x_i | mu, sigma)"""
n = len(mu)
a = n * np.log(2 * np.pi)
_, b = np.linalg.slogdet(sigma)
y = np.linalg.solve(sigma, x_i - mu)
c = np.dot(x_i - mu, y)
return -0.5 * (a + b + c) | Compute log N(x_i | mu, sigma) |
18,209 | import re
from abc import ABC, abstractmethod
import numpy as np
def kernel_checks(X, Y):
X = X.reshape(-1, 1) if X.ndim == 1 else X
Y = X if Y is None else Y
Y = Y.reshape(-1, 1) if Y.ndim == 1 else Y
assert X.ndim == 2, "X must have 2 dimensions, but got {}".format(X.ndim)
assert Y.ndim == 2, "Y must have 2 dimensions, but got {}".format(Y.ndim)
assert X.shape[1] == Y.shape[1], "X and Y must have the same number of columns"
return X, Y | null |
18,210 | import re
from abc import ABC, abstractmethod
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `pairwise_l2_distances` function. Write a Python function `def pairwise_l2_distances(X, Y)` to solve the following problem:
A fast, vectorized way to compute pairwise l2 distances between rows in `X` and `Y`. Notes ----- An entry of the pairwise Euclidean distance matrix for two vectors is .. math:: d[i, j] &= \sqrt{(x_i - y_i) @ (x_i - y_i)} \\\\ &= \sqrt{sum (x_i - y_j)^2} \\\\ &= \sqrt{sum (x_i)^2 - 2 x_i y_j + (y_j)^2} The code below computes the the third line using numpy broadcasting fanciness to avoid any for loops. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, C)` Collection of `N` input vectors Y : :py:class:`ndarray <numpy.ndarray>` of shape `(M, C)` Collection of `M` input vectors. If None, assume `Y` = `X`. Default is None. Returns ------- dists : :py:class:`ndarray <numpy.ndarray>` of shape `(N, M)` Pairwise distance matrix. Entry (i, j) contains the `L2` distance between :math:`x_i` and :math:`y_j`.
Here is the function:
def pairwise_l2_distances(X, Y):
"""
A fast, vectorized way to compute pairwise l2 distances between rows in `X`
and `Y`.
Notes
-----
An entry of the pairwise Euclidean distance matrix for two vectors is
.. math::
d[i, j] &= \sqrt{(x_i - y_i) @ (x_i - y_i)} \\\\
&= \sqrt{sum (x_i - y_j)^2} \\\\
&= \sqrt{sum (x_i)^2 - 2 x_i y_j + (y_j)^2}
The code below computes the the third line using numpy broadcasting
fanciness to avoid any for loops.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, C)`
Collection of `N` input vectors
Y : :py:class:`ndarray <numpy.ndarray>` of shape `(M, C)`
Collection of `M` input vectors. If None, assume `Y` = `X`. Default is
None.
Returns
-------
dists : :py:class:`ndarray <numpy.ndarray>` of shape `(N, M)`
Pairwise distance matrix. Entry (i, j) contains the `L2` distance between
:math:`x_i` and :math:`y_j`.
"""
D = -2 * X @ Y.T + np.sum(Y ** 2, axis=1) + np.sum(X ** 2, axis=1)[:, np.newaxis]
D[D < 0] = 0 # clip any value less than 0 (a result of numerical imprecision)
return np.sqrt(D) | A fast, vectorized way to compute pairwise l2 distances between rows in `X` and `Y`. Notes ----- An entry of the pairwise Euclidean distance matrix for two vectors is .. math:: d[i, j] &= \sqrt{(x_i - y_i) @ (x_i - y_i)} \\\\ &= \sqrt{sum (x_i - y_j)^2} \\\\ &= \sqrt{sum (x_i)^2 - 2 x_i y_j + (y_j)^2} The code below computes the the third line using numpy broadcasting fanciness to avoid any for loops. Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, C)` Collection of `N` input vectors Y : :py:class:`ndarray <numpy.ndarray>` of shape `(M, C)` Collection of `M` input vectors. If None, assume `Y` = `X`. Default is None. Returns ------- dists : :py:class:`ndarray <numpy.ndarray>` of shape `(N, M)` Pairwise distance matrix. Entry (i, j) contains the `L2` distance between :math:`x_i` and :math:`y_j`. |
18,211 | import ssl
import sys
from gzip import GzipFile
from json import JSONDecoder
from time import time
from urllib.request import Request, urlopen
HOST = 'translate.googleapis.com'
TESTIP_FORMAT = 'https://{}/translate_a/single?client=gtx&sl=en&tl=fr&q=a'
def _build_request(ip, host=HOST, testip_format=TESTIP_FORMAT):
def new_context():
def test_ip(ip, timeout=2.5, host=HOST, testip_format=TESTIP_FORMAT):
try:
req = _build_request(ip, host, testip_format)
start_time = time()
with urlopen(req, timeout=timeout, context=new_context()) as response:
end_time = time()
except Exception as e:
return str(e)
return end_time - start_time | null |
18,212 | import ssl
import sys
from gzip import GzipFile
from json import JSONDecoder
from time import time
from urllib.request import Request, urlopen
def _build_request(ip, host=HOST, testip_format=TESTIP_FORMAT):
url = testip_format.format(f'[{ip}]' if ':' in ip else ip)
request = Request(url)
request.add_header('Host', host)
return request
def new_context():
ctx = ssl._create_unverified_context() if sys.platform.startswith('darwin') else ssl.create_default_context()
old_wrap_socket = ctx.wrap_socket
def new_wrap_socket(socket, **kwargs):
kwargs["server_hostname"] = HOST
return old_wrap_socket(socket, **kwargs)
ctx.wrap_socket = new_wrap_socket
return ctx
def check_ip(ip, timeout=2.5):
try:
urlopen(_build_request(ip), timeout=timeout, context=new_context()).close()
except:
return False
return True | null |
18,213 | import ssl
import sys
from gzip import GzipFile
from json import JSONDecoder
from time import time
from urllib.request import Request, urlopen
def time_repr(secs):
return f'{secs*1000:.0f}ms' if secs < 0.9995 else f'{secs:.2f}s' | null |
18,214 | import ssl
import sys
from gzip import GzipFile
from json import JSONDecoder
from time import time
from urllib.request import Request, urlopen
HOST = 'translate.googleapis.com'
def dns_query(name=HOST, server='1.1.1.1', type='A', path='/dns-query'):
# https://github.com/stamparm/python-doh/blob/master/client.py
req = Request(
f'https://{server}{path}?name={name}&type={type}',
headers={'Accept': 'application/dns-json'},
)
content = urlopen(req).read().decode()
reply = JSONDecoder().decode(content)
return [] if 'Answer' not in reply else [_['data'] for _ in reply['Answer']] | null |
18,215 | import ssl
import sys
from gzip import GzipFile
from json import JSONDecoder
from time import time
from urllib.request import Request, urlopen
def read_url(url, timeout=3.5):
request = Request(url)
request.add_header('Accept-Encoding', 'gzip')
with urlopen(request, timeout=timeout) as response:
# Handle gzip
if response.getheader('Content-Encoding') == 'gzip':
with GzipFile(fileobj=response) as gz:
return {s.decode('utf-8').strip() for s in gz}
return {s.decode('utf-8').strip() for s in response} | null |
18,216 | from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x08@\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\
\x00\x00\x08\x07IDATh\x81\xed\x99[lT\xc7\
\x19\xc7\x7f3s\xce\xae\xbdxms\xb1]\xdb\x5cJ\
L[\xdc\xa4P\xc4\x1d\xdbA\xe1b\x9aJTJ+\
\x15\x94\x87\xcaBM\xd5*\x0f}\xa8\x94\x87\x22U4\
B\x8a*\xf5\x89V4Bj\x14\xa5UPy\xe9M\
\xc9\x03i*(PB \x04\x92P\xf0\xd2@ &\
\xd8\xc6\xd7\xc5\xb7\xf5\x9e3\xd3\x87\xbdx\xf7\xec9\xbb\
k\xb7\xa6/\xf9K\xa33;\xb7\xf3\xff\xcf\xf7\xcd\xcc\
wf\xe1s\xfc\x7f!2\x19c\x8c8\xf0\x9b\x91\xae\
\x87\x09yXH\xab\x093\xd3\xc8x\x9f9ux\xca\
2UM\xe1\xf1\xe8\xd1\xe7\xeb\xc7\xfe\xe7\x8c=\x90\x99\
L\xd7\xaf\x87\xba\xc6\x92\xa1W\x84\xb4\x9a2D\x82R\
.\x82\xda\xf4\x0cMt\xce7y\xc8\x110\xeeX\x87\
\xfd\x1a\xf8LvYu\xae\xac\xfc\xf1\x5cI\xcd\x06Y\
\x01\x99\x99\x87\x00\x17\x99\xe5\xc0VE\xd5\x93]\x87\xde\
\xaf\x9d3\xb32\x91\x15`\xccL\xca\x96\x959\x88\x9f\
`\x80~\xb7j\xde\xddH\x06U\x98\x80|1U\xde\
*QQ\xfd\xfc\x5cH\xcd\x06\x05\x02\xfc\x16\xaa\x1f\xca\
i\xf3(\xdch\xc6\x85\x08\xdea\xf2\x0a\xbcm\xfc\x94\
\xe4\x94=H\xd6\xec\x9e;\xbd\xd2\xb0\xfc\x0a}g\xb7\
\xc4\xda\x08*\xabo\xaa=\xf1\xde\xe5\x18\xb55\x95\x84\
+,\x94\x04!\x04&Gy&o|f\xc3\xd3\xee\
\xbe\xe3\x98\x9f\xac\x5c\xb9\xfcx\xa6\xac\xc0\x02\xb3!_\
j\xf6\x01\x06\xa7\xc2\xdc\xea\x9b\x221\x9dL\xd7\x09\x84\
\x98[\x92R6\x86B\xea\xf5X\xec\xe3g\x0b\x04\xf8\
\xc2\xa3(\x88|)\x1d7\x06\xaa\x18\x19\x8939\x95\
(\x10\xed7\xeb\xa5\x10\x0a\xa9_d\xf23.Tb\
\x1c\x03\xd4F\x0c\xbb[5k\x96i\x1a\xa2\x06!\xa0\
?.\xb8\xd2#9yM2<.|\xfb\xde\x1c\x89\
\xb2\xdd\x19\xe1a|\x8cp\xd8FJ5k\xd2\xb9\xb0\
m{i\xa1\x80\x22\xc4\x01\xb6\xb4h\xba\xb6\xb9\x84T\
\xbe\xd2\xc6ZCc\xad\xcb\x8e\xd5./\x9f\xb2x\xff\
\xae,\xb0\xd4p\x22B\xdf\x98E8\xac\x89\x8f\xc6\xa9\
\xa9\x89b\xdb%_\x1d\xcc)\xc7j\xd9Q\x8a\x19`\
K\x8b\xe6\xb9\x0e'\x1b\xf9\xf5\xc5\x05\xb1\xbe\x94\xf7}\
\xb9A\xd3Pm\x18\x9d\x80\xbbC\xf9\x16\xc8\xf5\x8e\x1b\
\x03U4V\x8f\xe2\xb8\x92\xb1\xb1q\xaa\xab\xabP\xea\
\xbf\xb3D\x9e\x80 \xd4F\x0c]\xdb\x5c\x04\xe0jx\
\xed\xbc\xe2\x1f1\x95\x15,P\xb4\xafr\xf9\xa0G2\
:)\x02\xd7Cl8\xcav=\x82v\x0d\xda\x18\xc6\
\xc7\xc7\x89D\x22Y\x11sY\x0b\x10\xb0\x88s\xc3\x8a\
]\xad:\xeb6\x7f\xb8hq:\x87|\xa6\xed\x99\x9b\
\xaa\x80\xbc\x97\xcf\xf0t\x84\xbe\x87\x16IG\xa3]\x8d\
\xd6\x86\x89\x89\x09\x1c\xc7\x09$W\x8e\xa8\xa2\xb1\x10\xc0\
\xdae\x1a\x80\xd1)\xc1\xdf\xae{\xf4\xcer\x87\xba1\
XE2\xa9q]\x9d~\x97ajj\xaa\xa8\x88R\
(\xeaB\x06h\x88\xa6(\xdc\xea\x17h\xcf\x97\xcd\xab\
\x07\xa6}\xfb]\xfcD\xf2\xab\xb7\xad\x82S<6\x1c\
\xe5Iw\x047\xedF\x22-\xe2\xd0\xb1\xd7\x11\xca\xe6\
\xc5\x1f\xec/I\xd8k\x15\xdf\x83\xac\xe8\x81V\x86\xab\
\xda\xca?\x04I\xedF\x0a\xc7M\xb9Q.\x17\xed:\
\xb8\xae\x1bH4\x08%\x17qo\x5c\xb0t\xa1\xa1\xa5\
\xde %h=\xc3\xe9\x95\xb33\xbbH$\x04\xfb7\
\xa5\x08\x04\x9d\x07\x00\xc7\xcf\x0d\xb0 q\x09ii\x94\
\x12\x08\x01\xa9/[\xc3\xa1c\xc7\x91\x96B\x08\xe9q\
C\xc3\xcf\xbf\xbf\xcfw<\x99\xd3\xca7]\xfd4\xd5\
\xa4\xba\xc2\xb0\xfb\xab:\xcf\x08\xa7c\x8a\xd31\xc5\xa9\
n\x85\xcaY\x1e\xdd}9\x02<\xeb\xc1\x89\xacB\x00\
\x12\x81\x90)\x01B\x0a\xa4\x14\x08\x09\x18\x8d1:\x8f\
|6\xefc\x95\x92\x168yM\xb2\xab\xd5%l\xc3\
w78$]\x8b\xbf\xdf\x90y\x0b~\xdb*\xcd3\
\xebR\xb3\x1f\x9f\x84\x8b\xb7\x85/y\x00\x1dZ\xc2\xde\
\xa7\xbe\xc1\xf2%\x10\xa9\xb4\xb0m\x85J\xab\x17)s\
\xf0\xc7S\x17\x88\xdd\xeb\xe3[\xed\x1bY\xdf\xdaR\x94\
_\xc9htdR\xf0\xdb\xb3\x16?z\xcaAI\xf8\
\xdeV\x87\xa7\x9f\x10\xc4\xfa\x04\xc6@K\xbd\xa1\xb1f\
\xa6\xc7k\xefXL\xbb\xa2\xc8\x0ee\xb81\x18\xa5\xb1\
f\x98i\x0b\x94\x92\xe4\x9eg\x0f'&\x89\xf5\xf4\x22\
\x04|iyc^\xbf\xa2\x02\x8a-\x99\x0b\xb7%`\
q\xa0\xdd\xa1\xc2\x86\xba\xa8\xa1.\x9a\xdf#\xe9\xc2\xef\
\xcf+.|\x9c\xbf\xd5z\xc9\x03t\x0fG\x99<\x7f\
\x8a{\x0f\xfaX\xffx\x0b{6\xafA\xcaT\xbfs\
W\xaf\xa3\xb5\xe6k\x8f-'\xa4$Z\xebl]Q\
\x01~\xc8\x15\xfd\xce-\xc9\xf5\xfb6\x9d\x8fk\xbe\xbe\
L\xf3\x85\x1a\x10\x18\xfa\xc7\x04\x1f~*x\xeb\xbaE\
\xdf\xa8\xa7\xbf\x0fy\x80\xa1D%\xdbW\xac\xa6\xe7\xc1\
\x03\xaet\xdfb\xcd\xaa\xe54\xd7-\xe2\xb3\x81a\xae\
\xdc\xbc\x83\x94\x92\xb65_\xc1\x18C2\x99\xc4\xb2\xac\
\x13\xc3\x94\xb8\xc5\xc8\xfb(I?\xef'\x1ax\xe2\x8b\
\xcb\xf9\xe8\xcem\xde8{\x99g\xf7\xb4\xf3\xd73\xef\
b\x8caK\xeb*\x16\xd5D\xb3}\x93\xc9$\xb6m\
\xfb\x8a(\xfe=\x90\xf3\xd2r>v\xf0i\xe7G\x1e\
6\x5c\xc3\xfa\xd5-,\x8eF\x19\x88\xc79\xf6\xe7\
\x93\x0c\xc6\xc7\xa9[XM\xc7\xba\xd6\x82\xfe\x8e\xe3\xa0\
\xd3{x\xee\x98E\x0f\xb2\x92\x07\x9a\x0f\xf9\xbc\xdf\x01\
\xe4\x01\x86\xa6*\x18\x9c\x0c\xb3{\xf3:0\x82\xa9\x84\
\x83\x94\x92\xefl\xdf\x8cJ\xcf\xb4w\xe1\xba\xae\x9b\x15\
Q \xa0$\x8a(\xf2\x9b\xf5b\xe43\x05\xff\xea_\
\xc0\xbb\x1f\xdd\xc4uS\xa4\x5c\xedr\xfeZ\xac\xe8)\
\xec\x15Q\xf2 +\x16>\xf8Uy_\x1eD\x1e\xe0\
Jo\x05\xddw{\x09)\x8b\xdd\x9b\xd6\x12\xb6-\xae\
\xdc\xfc\x847\xffy9+\xca\x0f\xfe\x02\xcaD\x90&\
\xbfY/\x88\x87<gCB\xd6\x22\xec%\xecm\xdb\
\xc4\x9a\x96\x15\xec\xdb\xd1N\xc8\xb6\xb9\xfa\xef;\xbcq\
\xee=\xb4\x0e\xb6DY\x02\xca1\x84\x978~\xed\x8a\
\xac\x95\xe6\xc7\xdaXR\x1b\xc5q5\x8dK\x16\xb2o\
\xe76\xc2\xb6\xcd\x87\xb7\xee\xf2\x97\xb3\x97\xf2f\xdb\xef\
]\xb3\x8bF=\x03\xf9\x11\x9f\x0dyc\xe0\xded}\
*\xbcN\x87\xd8\xcdu\x8b\xd8\xb7s+\xa1\x90\xc5\xb5\
\xdbw\xf9\xd3\x99\x8b\xbe\xef*\x10\xe0&\x93\xbd\xe5\x10\
\xf6\xbd|\x0a\x22n\x82\xdbd\x86\x19JTp?\xae\
\x98N\xba\xd9\x10\xbb\xb9~1\xfbw\xb4\x11\xb2,\xea\
k\xaa\xf3x\x04\x0a\x90L\xbc\xe0%ZLy\xa0\xa5\
\xca\xda^\xf3\xeb\xae\x0fDq\x1c\x8d\xabM\xb6ni\
\xc3b~\xf8L'mkW\x07\x92\xcf\x13\xf0\xe6O\
\x9b~\x97\x1c\xe9\x7f\xce\x99\x9e\xea5\xda\xc1/\xe9\x9c\
TP\xef\xa6S^\xfb$Z'1\xe9\xa4\xddT\xca\
\xfeN?\xbb\x07\x17`\x8c(\xd8y\xaa*+\x8a\x92\
\x87\x9c\xff\xc8\xe6\x03/\xfc\xec\xed\xc3\x17?xx\xd0\
\x97\x84\x99yn\xde\x18a\xef\xaez\x16T\xdaTT\
X\xd9\xf0:K\xd2\xe7\x9aq\xe5\xca\x95\x02\xe6\xb0\x8d\
\xce\x06\xf1\x89\xd1#BY\x08e!-;\x95T:\
\xa5\xf3\x1d[\xaa\xd9\xbb\xb3\x8e\x90-\xb1,\x95w\xf1\
\x1b\xe8\xbe~\xa1\xc4|\xe0\xe5_~\xbb\x7fY\x935\
\x94D(\x89\x90\xe9g:\xdf\xbee\x01O\xefX\
D(\xac\x08\x87\x14\xa9\x08\xa2|\xf2\xf3.\x00`\xc5\
\xd2\xc8\x11a@\x98\x94\xbf\x0aR\xf9\x8e\x0d\x95\xec\xe9\
\xa8\xc6R`+\x81R2\xf5Y\xe9\xb9z/F\x1e\
\x1e\x81\x80\xe4\xc8\xf4\xd1\x1c\xea\x18\x0dm\x1b*\xe9\xdc\
^C\xc8V\xd8\xb6\xc2\xb2U\x9a|\xf93\x9f\xc1\xbc\
\x0bx\xe9\xa5]\x83\xcd\x8dj\xc0\x18\x83\xd1\x86\xb6\x8d\
\x95t>Y\x8d\xa5\x04J\x09\x94%\x91\x22\x98\xbc\xdf\
V\x9eHL\xdf\x7fd\x02\x00\x9a\x1b\xac#\x18h\xdf\
XIgG\x14K\x09,Kf\xc9C\xc0\x01\x190\
\xebwzF\x0ef\xf2s\xbf\xe3\x9e\x05\xcc\x98>\xda\
\xb6\xc1~q\xc7\xd6J0\xa9\xdb\x0b\x83@\xbb\x06\x8c\
\xce\xdb\x1e\xbd\xc8\xadK$\x12\xbd\x9f\xf5M\x1c\xfc\xe6\
\x9em\xaf>\x0a\xde\x9f\xa3\x1c\xfc\x07N\xa7\x86X,\
q\xd8c\x00\x00\x00\x00IEND\xaeB`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x08\
\x0aaZ\xa7\
\x00i\
\x00c\x00o\x00n\x00.\x00p\x00n\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x8drO\xf5\xf0\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) | null |
18,217 | from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x08@\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\
\x00\x00\x08\x07IDATh\x81\xed\x99[lT\xc7\
\x19\xc7\x7f3s\xce\xae\xbdxms\xb1]\xdb\x5cJ\
L[\xdc\xa4P\xc4\x1d\xdbA\xe1b\x9aJTJ+\
\x15\x94\x87\xcaBM\xd5*\x0f}\xa8\x94\x87\x22U4\
B\x8a*\xf5\x89V4Bj\x14\xa5UPy\xe9M\
\xc9\x03i*(PB \x04\x92P\xf0\xd2@ &\
\xd8\xc6\xd7\xc5\xb7\xf5\x9e3\xd3\x87\xbdx\xf7\xec9\xbb\
k\xb7\xa6/\xf9K\xa33;\xb7\xf3\xff\xcf\xf7\xcd\xcc\
wf\xe1s\xfc\x7f!2\x19c\x8c8\xf0\x9b\x91\xae\
\x87\x09yXH\xab\x093\xd3\xc8x\x9f9ux\xca\
2UM\xe1\xf1\xe8\xd1\xe7\xeb\xc7\xfe\xe7\x8c=\x90\x99\
L\xd7\xaf\x87\xba\xc6\x92\xa1W\x84\xb4\x9a2D\x82R\
.\x82\xda\xf4\x0cMt\xce7y\xc8\x110\xeeX\x87\
\xfd\x1a\xf8LvYu\xae\xac\xfc\xf1\x5cI\xcd\x06Y\
\x01\x99\x99\x87\x00\x17\x99\xe5\xc0VE\xd5\x93]\x87\xde\
\xaf\x9d3\xb32\x91\x15`\xccL\xca\x96\x959\x88\x9f\
`\x80~\xb7j\xde\xddH\x06U\x98\x80|1U\xde\
*QQ\xfd\xfc\x5cH\xcd\x06\x05\x02\xfc\x16\xaa\x1f\xca\
i\xf3(\xdch\xc6\x85\x08\xdea\xf2\x0a\xbcm\xfc\x94\
\xe4\x94=H\xd6\xec\x9e;\xbd\xd2\xb0\xfc\x0a}g\xb7\
\xc4\xda\x08*\xabo\xaa=\xf1\xde\xe5\x18\xb55\x95\x84\
+,\x94\x04!\x04&Gy&o|f\xc3\xd3\xee\
\xbe\xe3\x98\x9f\xac\x5c\xb9\xfcx\xa6\xac\xc0\x02\xb3!_\
j\xf6\x01\x06\xa7\xc2\xdc\xea\x9b\x221\x9dL\xd7\x09\x84\
\x98[\x92R6\x86B\xea\xf5X\xec\xe3g\x0b\x04\xf8\
\xc2\xa3(\x88|)\x1d7\x06\xaa\x18\x19\x8939\x95\
(\x10\xed7\xeb\xa5\x10\x0a\xa9_d\xf23.Tb\
\x1c\x03\xd4F\x0c\xbb[5k\x96i\x1a\xa2\x06!\xa0\
?.\xb8\xd2#9yM2<.|\xfb\xde\x1c\x89\
\xb2\xdd\x19\xe1a|\x8cp\xd8FJ5k\xd2\xb9\xb0\
m{i\xa1\x80\x22\xc4\x01\xb6\xb4h\xba\xb6\xb9\x84T\
\xbe\xd2\xc6ZCc\xad\xcb\x8e\xd5./\x9f\xb2x\xff\
\xae,\xb0\xd4p\x22B\xdf\x98E8\xac\x89\x8f\xc6\xa9\
\xa9\x89b\xdb%_\x1d\xcc)\xc7j\xd9Q\x8a\x19`\
K\x8b\xe6\xb9\x0e'\x1b\xf9\xf5\xc5\x05\xb1\xbe\x94\xf7}\
\xb9A\xd3Pm\x18\x9d\x80\xbbC\xf9\x16\xc8\xf5\x8e\x1b\
\x03U4V\x8f\xe2\xb8\x92\xb1\xb1q\xaa\xab\xabP\xea\
\xbf\xb3D\x9e\x80 \xd4F\x0c]\xdb\x5c\x04\xe0jx\
\xed\xbc\xe2\x1f1\x95\x15,P\xb4\xafr\xf9\xa0G2\
:)\x02\xd7Cl8\xcav=\x82v\x0d\xda\x18\xc6\
\xc7\xc7\x89D\x22Y\x11sY\x0b\x10\xb0\x88s\xc3\x8a\
]\xad:\xeb6\x7f\xb8hq:\x87|\xa6\xed\x99\x9b\
\xaa\x80\xbc\x97\xcf\xf0t\x84\xbe\x87\x16IG\xa3]\x8d\
\xd6\x86\x89\x89\x09\x1c\xc7\x09$W\x8e\xa8\xa2\xb1\x10\xc0\
\xdae\x1a\x80\xd1)\xc1\xdf\xae{\xf4\xcer\x87\xba1\
XE2\xa9q]\x9d~\x97ajj\xaa\xa8\x88R\
(\xeaB\x06h\x88\xa6(\xdc\xea\x17h\xcf\x97\xcd\xab\
\x07\xa6}\xfb]\xfcD\xf2\xab\xb7\xad\x82S<6\x1c\
\xe5Iw\x047\xedF\x22-\xe2\xd0\xb1\xd7\x11\xca\xe6\
\xc5\x1f\xec/I\xd8k\x15\xdf\x83\xac\xe8\x81V\x86\xab\
\xda\xca?\x04I\xedF\x0a\xc7M\xb9Q.\x17\xed:\
\xb8\xae\x1bH4\x08%\x17qo\x5c\xb0t\xa1\xa1\xa5\
\xde %h=\xc3\xe9\x95\xb33\xbbH$\x04\xfb7\
\xa5\x08\x04\x9d\x07\x00\xc7\xcf\x0d\xb0 q\x09ii\x94\
\x12\x08\x01\xa9/[\xc3\xa1c\xc7\x91\x96B\x08\xe9q\
C\xc3\xcf\xbf\xbf\xcfw<\x99\xd3\xca7]\xfd4\xd5\
\xa4\xba\xc2\xb0\xfb\xab:\xcf\x08\xa7c\x8a\xd31\xc5\xa9\
n\x85\xcaY\x1e\xdd}9\x02<\xeb\xc1\x89\xacB\x00\
\x12\x81\x90)\x01B\x0a\xa4\x14\x08\x09\x18\x8d1:\x8f\
|6\xefc\x95\x92\x168yM\xb2\xab\xd5%l\xc3\
w78$]\x8b\xbf\xdf\x90y\x0b~\xdb*\xcd3\
\xebR\xb3\x1f\x9f\x84\x8b\xb7\x85/y\x00\x1dZ\xc2\xde\
\xa7\xbe\xc1\xf2%\x10\xa9\xb4\xb0m\x85J\xab\x17)s\
\xf0\xc7S\x17\x88\xdd\xeb\xe3[\xed\x1bY\xdf\xdaR\x94\
_\xc9htdR\xf0\xdb\xb3\x16?z\xcaAI\xf8\
\xdeV\x87\xa7\x9f\x10\xc4\xfa\x04\xc6@K\xbd\xa1\xb1f\
\xa6\xc7k\xefXL\xbb\xa2\xc8\x0ee\xb81\x18\xa5\xb1\
f\x98i\x0b\x94\x92\xe4\x9eg\x0f'&\x89\xf5\xf4\x22\
\x04|iyc^\xbf\xa2\x02\x8a-\x99\x0b\xb7%`\
q\xa0\xdd\xa1\xc2\x86\xba\xa8\xa1.\x9a\xdf#\xe9\xc2\xef\
\xcf+.|\x9c\xbf\xd5z\xc9\x03t\x0fG\x99<\x7f\
\x8a{\x0f\xfaX\xffx\x0b{6\xafA\xcaT\xbfs\
W\xaf\xa3\xb5\xe6k\x8f-'\xa4$Z\xebl]Q\
\x01~\xc8\x15\xfd\xce-\xc9\xf5\xfb6\x9d\x8fk\xbe\xbe\
L\xf3\x85\x1a\x10\x18\xfa\xc7\x04\x1f~*x\xeb\xbaE\
\xdf\xa8\xa7\xbf\x0fy\x80\xa1D%\xdbW\xac\xa6\xe7\xc1\
\x03\xaet\xdfb\xcd\xaa\xe54\xd7-\xe2\xb3\x81a\xae\
\xdc\xbc\x83\x94\x92\xb65_\xc1\x18C2\x99\xc4\xb2\xac\
\x13\xc3\x94\xb8\xc5\xc8\xfb(I?\xef'\x1ax\xe2\x8b\
\xcb\xf9\xe8\xcem\xde8{\x99g\xf7\xb4\xf3\xd73\xef\
b\x8caK\xeb*\x16\xd5D\xb3}\x93\xc9$\xb6m\
\xfb\x8a(\xfe=\x90\xf3\xd2r>v\xf0i\xe7G\x1e\
6\x5c\xc3\xfa\xd5-,\x8eF\x19\x88\xc79\xf6\xe7\
\x93\x0c\xc6\xc7\xa9[XM\xc7\xba\xd6\x82\xfe\x8e\xe3\xa0\
\xd3{x\xee\x98E\x0f\xb2\x92\x07\x9a\x0f\xf9\xbc\xdf\x01\
\xe4\x01\x86\xa6*\x18\x9c\x0c\xb3{\xf3:0\x82\xa9\x84\
\x83\x94\x92\xefl\xdf\x8cJ\xcf\xb4w\xe1\xba\xae\x9b\x15\
Q \xa0$\x8a(\xf2\x9b\xf5b\xe43\x05\xff\xea_\
\xc0\xbb\x1f\xdd\xc4uS\xa4\x5c\xedr\xfeZ\xac\xe8)\
\xec\x15Q\xf2 +\x16>\xf8Uy_\x1eD\x1e\xe0\
Jo\x05\xddw{\x09)\x8b\xdd\x9b\xd6\x12\xb6-\xae\
\xdc\xfc\x847\xffy9+\xca\x0f\xfe\x02\xcaD\x90&\
\xbfY/\x88\x87<gCB\xd6\x22\xec%\xecm\xdb\
\xc4\x9a\x96\x15\xec\xdb\xd1N\xc8\xb6\xb9\xfa\xef;\xbcq\
\xee=\xb4\x0e\xb6DY\x02\xca1\x84\x978~\xed\x8a\
\xac\x95\xe6\xc7\xdaXR\x1b\xc5q5\x8dK\x16\xb2o\
\xe76\xc2\xb6\xcd\x87\xb7\xee\xf2\x97\xb3\x97\xf2f\xdb\xef\
]\xb3\x8bF=\x03\xf9\x11\x9f\x0dyc\xe0\xded}\
*\xbcN\x87\xd8\xcdu\x8b\xd8\xb7s+\xa1\x90\xc5\xb5\
\xdbw\xf9\xd3\x99\x8b\xbe\xef*\x10\xe0&\x93\xbd\xe5\x10\
\xf6\xbd|\x0a\x22n\x82\xdbd\x86\x19JTp?\xae\
\x98N\xba\xd9\x10\xbb\xb9~1\xfbw\xb4\x11\xb2,\xea\
k\xaa\xf3x\x04\x0a\x90L\xbc\xe0%ZLy\xa0\xa5\
\xca\xda^\xf3\xeb\xae\x0fDq\x1c\x8d\xabM\xb6ni\
\xc3b~\xf8L'mkW\x07\x92\xcf\x13\xf0\xe6O\
\x9b~\x97\x1c\xe9\x7f\xce\x99\x9e\xea5\xda\xc1/\xe9\x9c\
TP\xef\xa6S^\xfb$Z'1\xe9\xa4\xddT\xca\
\xfeN?\xbb\x07\x17`\x8c(\xd8y\xaa*+\x8a\x92\
\x87\x9c\xff\xc8\xe6\x03/\xfc\xec\xed\xc3\x17?xx\xd0\
\x97\x84\x99yn\xde\x18a\xef\xaez\x16T\xdaTT\
X\xd9\xf0:K\xd2\xe7\x9aq\xe5\xca\x95\x02\xe6\xb0\x8d\
\xce\x06\xf1\x89\xd1#BY\x08e!-;\x95T:\
\xa5\xf3\x1d[\xaa\xd9\xbb\xb3\x8e\x90-\xb1,\x95w\xf1\
\x1b\xe8\xbe~\xa1\xc4|\xe0\xe5_~\xbb\x7fY\x935\
\x94D(\x89\x90\xe9g:\xdf\xbee\x01O\xefX\
D(\xac\x08\x87\x14\xa9\x08\xa2|\xf2\xf3.\x00`\xc5\
\xd2\xc8\x11a@\x98\x94\xbf\x0aR\xf9\x8e\x0d\x95\xec\xe9\
\xa8\xc6R`+\x81R2\xf5Y\xe9\xb9z/F\x1e\
\x1e\x81\x80\xe4\xc8\xf4\xd1\x1c\xea\x18\x0dm\x1b*\xe9\xdc\
^C\xc8V\xd8\xb6\xc2\xb2U\x9a|\xf93\x9f\xc1\xbc\
\x0bx\xe9\xa5]\x83\xcd\x8dj\xc0\x18\x83\xd1\x86\xb6\x8d\
\x95t>Y\x8d\xa5\x04J\x09\x94%\x91\x22\x98\xbc\xdf\
V\x9eHL\xdf\x7fd\x02\x00\x9a\x1b\xac#\x18h\xdf\
XIgG\x14K\x09,Kf\xc9C\xc0\x01\x190\
\xebwzF\x0ef\xf2s\xbf\xe3\x9e\x05\xcc\x98>\xda\
\xb6\xc1~q\xc7\xd6J0\xa9\xdb\x0b\x83@\xbb\x06\x8c\
\xce\xdb\x1e\xbd\xc8\xadK$\x12\xbd\x9f\xf5M\x1c\xfc\xe6\
\x9em\xaf>\x0a\xde\x9f\xa3\x1c\xfc\x07N\xa7\x86X,\
q\xd8c\x00\x00\x00\x00IEND\xaeB`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x08\
\x0aaZ\xa7\
\x00i\
\x00c\x00o\x00n\x00.\x00p\x00n\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x8drO\xf5\xf0\
"
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) | null |
18,218 | from io import StringIO
from json import loads
from glob import glob
from pathlib import Path
from pytablewriter import MarkdownTableWriter
def print_md_table(settings) -> MarkdownTableWriter:
writer = MarkdownTableWriter(
headers=["Setting", "Default", "Context", "Multiple", "Description"],
value_matrix=[
[
f"`{setting}`",
"" if data["default"] == "" else f"`{data['default']}`",
data["context"],
"no" if "multiple" not in data else "yes",
data["help"],
]
for setting, data in settings.items()
],
)
return writer | null |
18,219 | from io import StringIO
from json import loads
from glob import glob
from pathlib import Path
from pytablewriter import MarkdownTableWriter
def stream_support(support) -> str:
md = "STREAM support "
if support == "no":
md += ":x:"
elif support == "yes":
md += ":white_check_mark:"
else:
md += ":warning:"
return md | null |
18,220 |
The provided code snippet includes necessary dependencies for implementing the `toc` function. Write a Python function `def toc(obj)` to solve the following problem:
main routine
Here is the function:
def toc(obj):
""" main routine """
print """
import libinjection
def lookup(state, stype, keyword):
keyword = keyword.upper()
if stype == libinjection.LOOKUP_FINGERPRINT:
if keyword in fingerprints and libinjection.sqli_not_whitelist(state):
return 'F'
else:
return chr(0)
return words.get(keyword, chr(0))
"""
words = {}
keywords = obj['keywords']
for k,v in keywords.iteritems():
words[str(k)] = str(v)
print 'words = {'
for k in sorted(words.keys()):
print "'{0}': '{1}',".format(k, words[k])
print '}\n'
keywords = obj['fingerprints']
print 'fingerprints = set(['
for k in sorted(keywords):
print "'{0}',".format(k.upper())
print '])'
return 0 | main routine |
18,221 |
The provided code snippet includes necessary dependencies for implementing the `toc` function. Write a Python function `def toc(obj)` to solve the following problem:
main routine
Here is the function:
def toc(obj):
""" main routine """
print("""<?php
function lookup($state, $stype, $keyword) {
$keyword = struper(keyword);
if ($stype == libinjection.LOOKUP_FINGERPRINT) {
if ($keyword == $fingerprints && libinjection.sqli_not_whitelist($state)) {
return 'F';
} else {
return chr(0);
}
}
return $words.get(keyword, chr(0));
}
""")
words = {}
keywords = obj['keywords']
for k,v in keywords.items():
words[str(k)] = str(v)
print('$words = array(')
for k in sorted(words.keys()):
print("'{0}' => '{1}',".format(k, words[k]))
print(');\n')
keywords = obj['fingerprints']
print('$fingerprints = array(')
for k in sorted(keywords):
print("'{0}',".format(k.upper()))
print(');')
return 0 | main routine |
18,222 | import sys
The provided code snippet includes necessary dependencies for implementing the `toc` function. Write a Python function `def toc(obj)` to solve the following problem:
main routine
Here is the function:
def toc(obj):
""" main routine """
print("""
#ifndef LIBINJECTION_SQLI_DATA_H
#define LIBINJECTION_SQLI_DATA_H
#include "libinjection.h"
#include "libinjection_sqli.h"
typedef struct {
const char *word;
char type;
} keyword_t;
static size_t parse_money(sfilter * sf);
static size_t parse_other(sfilter * sf);
static size_t parse_white(sfilter * sf);
static size_t parse_operator1(sfilter *sf);
static size_t parse_char(sfilter *sf);
static size_t parse_hash(sfilter *sf);
static size_t parse_dash(sfilter *sf);
static size_t parse_slash(sfilter *sf);
static size_t parse_backslash(sfilter * sf);
static size_t parse_operator2(sfilter *sf);
static size_t parse_string(sfilter *sf);
static size_t parse_word(sfilter * sf);
static size_t parse_var(sfilter * sf);
static size_t parse_number(sfilter * sf);
static size_t parse_tick(sfilter * sf);
static size_t parse_ustring(sfilter * sf);
static size_t parse_qstring(sfilter * sf);
static size_t parse_nqstring(sfilter * sf);
static size_t parse_xstring(sfilter * sf);
static size_t parse_bstring(sfilter * sf);
static size_t parse_estring(sfilter * sf);
static size_t parse_bword(sfilter * sf);
""")
#
# Mapping of character to function
#
fnmap = {
'CHAR_WORD' : 'parse_word',
'CHAR_WHITE': 'parse_white',
'CHAR_OP1' : 'parse_operator1',
'CHAR_UNARY': 'parse_operator1',
'CHAR_OP2' : 'parse_operator2',
'CHAR_BANG' : 'parse_operator2',
'CHAR_BACK' : 'parse_backslash',
'CHAR_DASH' : 'parse_dash',
'CHAR_STR' : 'parse_string',
'CHAR_HASH' : 'parse_hash',
'CHAR_NUM' : 'parse_number',
'CHAR_SLASH': 'parse_slash',
'CHAR_SEMICOLON' : 'parse_char',
'CHAR_COMMA': 'parse_char',
'CHAR_LEFTPARENS': 'parse_char',
'CHAR_RIGHTPARENS': 'parse_char',
'CHAR_LEFTBRACE': 'parse_char',
'CHAR_RIGHTBRACE': 'parse_char',
'CHAR_VAR' : 'parse_var',
'CHAR_OTHER': 'parse_other',
'CHAR_MONEY': 'parse_money',
'CHAR_TICK' : 'parse_tick',
'CHAR_UNDERSCORE': 'parse_underscore',
'CHAR_USTRING' : 'parse_ustring',
'CHAR_QSTRING' : 'parse_qstring',
'CHAR_NQSTRING' : 'parse_nqstring',
'CHAR_XSTRING' : 'parse_xstring',
'CHAR_BSTRING' : 'parse_bstring',
'CHAR_ESTRING' : 'parse_estring',
'CHAR_BWORD' : 'parse_bword'
}
print()
print("typedef size_t (*pt2Function)(sfilter *sf);")
print("static const pt2Function char_parse_map[] = {")
pos = 0
for character in obj['charmap']:
print(" &%s, /* %d */" % (fnmap[character], pos))
pos += 1
print("};")
print()
# keywords
# load them
keywords = obj['keywords']
for fingerprint in list(obj['fingerprints']):
fingerprint = '0' + fingerprint.upper()
keywords[fingerprint] = 'F'
needhelp = []
for key in keywords.keys():
if key != key.upper():
needhelp.append(key)
for key in needhelp:
tmpv = keywords[key]
del keywords[key]
keywords[key.upper()] = tmpv
print("static const keyword_t sql_keywords[] = {")
for k in sorted(keywords.keys()):
if len(k) > 31:
sys.stderr.write("ERROR: keyword greater than 32 chars\n")
sys.exit(1)
print(" {\"%s\", '%s'}," % (k, keywords[k]))
print("};")
print("static const size_t sql_keywords_sz = %d;" % (len(keywords), ))
print("#endif")
return 0 | main routine |
18,223 | import subprocess
RMAP = {
'1': '1',
'f': 'convert',
'&': 'and',
'v': '@version',
'n': 'aname',
's': "\"1\"",
'(': '(',
')': ')',
'o': '*',
'E': 'select',
'U': 'union',
'k': "JOIN",
't': 'binary',
',': ',',
';': ';',
'c': ' -- comment',
'T': 'DROP',
':': ':',
'A': 'COLLATE',
'B': 'group by',
'X': '/* /* nested comment */ */'
}
The provided code snippet includes necessary dependencies for implementing the `fingerprint_to_sqli` function. Write a Python function `def fingerprint_to_sqli()` to solve the following problem:
main code, expects to be run in main libinjection/src directory and hardwires "fingerprints.txt" as input file
Here is the function:
def fingerprint_to_sqli():
"""
main code, expects to be run in main libinjection/src directory
and hardwires "fingerprints.txt" as input file
"""
mode = 'print'
fingerprints = []
with open('fingerprints.txt', 'r') as openfile:
for line in openfile:
fingerprints.append(line.strip())
for fingerprint in fingerprints:
sql = []
for char in fingerprint:
sql.append(RMAP[char])
sqlstr = ' '.join(sql)
if mode == 'print':
print(fingerprint, ' '.join(sql))
else:
args = ['./fptool', '-0', sqlstr]
actualfp = subprocess.check_output(args).strip()
if fingerprint != actualfp:
print(fingerprint, actualfp, ' '.join(sql)) | main code, expects to be run in main libinjection/src directory and hardwires "fingerprints.txt" as input file |
18,224 | KEYWORDS = {
'_BIG5': 't',
'_DEC8': 't',
'_CP850': 't',
'_HP8': 't',
'_KOI8R': 't',
'_LATIN1': 't',
'_LATIN2': 't',
'_SWE7': 't',
'_ASCII': 't',
'_UJIS': 't',
'_SJIS': 't',
'_HEBREW': 't',
'_TIS620': 't',
'_EUCKR': 't',
'_KOI8U': 't',
'_GB2312': 't',
'_GREEK': 't',
'_CP1250': 't',
'_GBK': 't',
'_LATIN5': 't',
'_ARMSCII8': 't',
'_UTF8': 't',
'_USC2': 't',
'_CP866': 't',
'_KEYBCS2': 't',
'_MACCE': 't',
'_MACROMAN': 't',
'_CP852': 't',
'_LATIN7': 't',
'_CP1251': 't',
'_CP1257': 't',
'_BINARY': 't',
'_GEOSTD8': 't',
'_CP932': 't',
'_EUCJPMS': 't',
'AUTOINCREMENT' : 'k',
'UTL_INADDR.GET_HOST_ADDRESS': 'f',
'UTL_INADDR.GET_HOST_NAME' : 'f',
'UTL_HTTP.REQUEST' : 'f',
# ORACLE
# http://blog.red-database-security.com/
# /2009/01/17/tutorial-oracle-sql-injection-in-webapps-part-i/print/
'DBMS_PIPE.RECEIVE_MESSAGE': 'f',
'CTXSYS.DRITHSX.SN': 'f',
'SYS.STRAGG': 'f',
'SYS.FN_BUILTIN_PERMISSIONS' : 'f',
'SYS.FN_GET_AUDIT_FILE' : 'f',
'SYS.FN_MY_PERMISSIONS' : 'f',
'SYS.DATABASE_NAME' : 'n',
'ABORT' : 'k',
'ABS' : 'f',
'ACCESSIBLE' : 'k',
'ACOS' : 'f',
'ADDDATE' : 'f',
'ADDTIME' : 'f',
'AES_DECRYPT' : 'f',
'AES_ENCRYPT' : 'f',
'AGAINST' : 'k',
'AGE' : 'f',
'ALTER' : 'k',
# 'ALL_USERS' - oracle
'ALL_USERS' : 'k',
'ANALYZE' : 'k',
'AND' : '&',
# ANY -- acts like a function
# http://dev.mysql.com/doc/refman/5.0/en/any-in-some-subqueries.html
'ANY' : 'f',
# pgsql
'ANYELEMENT' : 't',
'ANYARRAY' : 't',
'ANYNONARRY' : 't',
'CSTRING' : 't',
# array_... pgsql
'ARRAY_AGG' : 'f',
'ARRAY_CAT' : 'f',
'ARRAY_NDIMS' : 'f',
'ARRAY_DIM' : 'f',
'ARRAY_FILL' : 'f',
'ARRAY_LENGTH' : 'f',
'ARRAY_LOWER' : 'f',
'ARRAY_UPPER' : 'f',
'ARRAY_PREPEND' : 'f',
'ARRAY_TO_STRING' : 'f',
'ARRAY_TO_JSON' : 'f',
'APP_NAME' : 'f',
'APPLOCK_MODE' : 'f',
'APPLOCK_TEST' : 'f',
'ASSEMBLYPROPERTY' : 'f',
'AS' : 'k',
'ASC' : 'k',
'ASCII' : 'f',
'ASENSITIVE' : 'k',
'ASIN' : 'f',
'ASYMKEY_ID' : 'f',
'ATAN' : 'f',
'ATAN2' : 'f',
'AVG' : 'f',
'BEFORE' : 'k',
'BEGIN' : 'T',
'BEGIN GOTO' : 'T',
'BEGIN TRY' : 'T',
'BEGIN TRY DECLARE' : 'T',
'BEGIN DECLARE' : 'T',
'BENCHMARK' : 'f',
'BETWEEN' : 'o',
'BIGINT' : 't',
'BIGSERIAL' : 't',
'BIN' : 'f',
# "select binary 1" forward type operator
'BINARY' : 't',
'BINARY_DOUBLE_INFINITY' : '1',
'BINARY_DOUBLE_NAN' : '1',
'BINARY_FLOAT_INFINITY' : '1',
'BINARY_FLOAT_NAN' : '1',
'BINBINARY' : 'f',
'BIT_AND' : 'f',
'BIT_COUNT' : 'f',
'BIT_LENGTH' : 'f',
'BIT_OR' : 'f',
'BIT_XOR' : 'f',
'BLOB' : 'k',
# pgsql
'BOOL_AND' : 'f',
# pgsql
'BOOL_OR' : 'f',
'BOOLEAN' : 't',
'BOTH' : 'k',
# pgsql
'BTRIM' : 'f',
'BY' : 'n',
'BYTEA' : 't',
# MS ACCESS
#
#
'CBOOL' : 'f',
'CBYTE' : 'f',
'CCUR' : 'f',
'CDATE' : 'f',
'CDBL' : 'f',
'CINT' : 'f',
'CLNG' : 'f',
'CSNG' : 'f',
'CVAR' : 'f',
# CHANGES: sqlite3
'CHANGES' : 'f',
'CHDIR' : 'f',
'CHDRIVE' : 'f',
'CURDIR' : 'f',
'FILEDATETIME' : 'f',
'FILELEN' : 'f',
'GETATTR' : 'f',
'MKDIR' : 'f',
'SETATTR' : 'f',
'DAVG' : 'f',
'DCOUNT' : 'f',
'DFIRST' : 'f',
'DLAST' : 'f',
'DLOOKUP' : 'f',
'DMAX' : 'f',
'DMIN' : 'f',
'DSUM' : 'f',
# TBD
'DO' : 'n',
'CALL' : 'T',
'CASCADE' : 'k',
'CASE' : 'E',
'CAST' : 'f',
# pgsql 'cube root' lol
'CBRT' : 'f',
'CEIL' : 'f',
'CEILING' : 'f',
'CERTENCODED' : 'f',
'CERTPRIVATEKEY' : 'f',
'CERT_ID' : 'f',
'CERT_PROPERTY' : 'f',
'CHANGE' : 'k',
# 'CHAR'
# sometimes a function too
# TBD
'CHAR' : 'f',
'CHARACTER' : 't',
'CHARACTER_LENGTH' : 'f',
'CHARINDEX' : 'f',
'CHARSET' : 'f',
'CHAR_LENGTH' : 'f',
# mysql keyword but not clear how used
'CHECK' : 'n',
'CHECKSUM_AGG' : 'f',
'CHOOSE' : 'f',
'CHR' : 'f',
'CLOCK_TIMESTAMP' : 'f',
'COALESCE' : 'f',
'COERCIBILITY' : 'f',
'COL_LENGTH' : 'f',
'COL_NAME' : 'f',
'COLLATE' : 'A',
'COLLATION' : 'f',
'COLLATIONPROPERTY' : 'f',
# TBD
'COLUMN' : 'k',
'COLUMNPROPERTY' : 'f',
'COLUMNS_UPDATED' : 'f',
'COMPRESS' : 'f',
'CONCAT' : 'f',
'CONCAT_WS' : 'f',
'CONDITION' : 'k',
'CONNECTION_ID' : 'f',
'CONSTRAINT' : 'k',
'CONTINUE' : 'k',
'CONV' : 'f',
'CONVERT' : 'f',
# pgsql
'CONVERT_FROM' : 'f',
# pgsql
'CONVERT_TO' : 'f',
'CONVERT_TZ' : 'f',
'COS' : 'f',
'COT' : 'f',
'COUNT' : 'f',
'COUNT_BIG' : 'k',
'CRC32' : 'f',
'CREATE' : 'E',
'CREATE OR' : 'n',
'CREATE OR REPLACE' : 'T',
'CROSS' : 'n',
'CUME_DIST' : 'f',
'CURDATE' : 'f',
'CURRENT_DATABASE' : 'f',
# MYSQL Dual, function or variable-like
# And IBM
# http://publib.boulder.ibm.com/infocenter/iseries/v5r4/index.jsp?topic=%2Fsqlp%2Frbafykeyu.htm
'CURRENT_DATE' : 'v',
'CURRENT_TIME' : 'v',
'CURRENT_TIMESTAMP' : 'v',
'CURRENT_QUERY' : 'f',
'CURRENT_SCHEMA' : 'f',
'CURRENT_SCHEMAS' : 'f',
'CURRENT_SETTING' : 'f',
# current_user sometimes acts like a variable
# other times it acts like a function depending
# on database. This is converted in the right
# type in the folding code
# mysql = function
# ?? = variable
'CURRENT_USER' : 'v',
'CURRENTUSER' : 'f',
#
# DB2 'Special Registers'
# These act like variables
# http://publib.boulder.ibm.com/infocenter/iseries/v5r4/index.jsp?topic=%2Fsqlp%2Frbafykeyu.htm
'CURRENT DATE' : 'v',
'CURRENT DEGREE' : 'v',
'CURRENT_PATH' : 'v',
'CURRENT PATH' : 'v',
'CURRENT FUNCTION' : 'v',
'CURRENT SCHEMA' : 'v',
'CURRENT_SERVER' : 'v',
'CURRENT SERVER' : 'v',
'CURRENT TIME' : 'v',
'CURRENT_TIMEZONE' : 'v',
'CURRENT TIMEZONE' : 'v',
'CURRENT FUNCTION PATH': 'v',
# pgsql
'CURRVAL' : 'f',
'CURSOR' : 'k',
'CURSOR_STATUS' : 'f',
'CURTIME' : 'f',
# this might be a function
'DATABASE' : 'n',
'DATABASE_PRINCIPAL_ID' : 'f',
'DATABASEPROPERTYEX' : 'f',
'DATABASES' : 'k',
'DATALENGTH' : 'f',
'DATE' : 'f',
'DATEDIFF' : 'f',
# sqlserver
'DATENAME' : 'f',
#sqlserver
'DATEPART' : 'f',
'DATEADD' : 'f',
'DATESERIAL' : 'f',
'DATEVALUE' : 'f',
'DATEFROMPARTS' : 'f',
'DATETIME2FROMPARTS' : 'f',
'DATETIMEFROMPARTS' : 'f',
# sqlserver
'DATETIMEOFFSETFROMPARTS' : 'f',
'DATE_ADD' : 'f',
'DATE_FORMAT' : 'f',
'DATE_PART' : 'f',
'DATE_SUB' : 'f',
'DATE_TRUNC' : 'f',
'DAY' : 'f',
'DAYNAME' : 'f',
'DAYOFMONTH' : 'f',
'DAYOFWEEK' : 'f',
'DAYOFYEAR' : 'f',
'DAY_HOUR' : 'k',
'DAY_MICROSECOND' : 'k',
'DAY_MINUTE' : 'k',
'DAY_SECOND' : 'k',
'DB_ID' : 'f',
'DB_NAME' : 'f',
'DEC' : 'k',
'DECIMAL' : 't',
# can only be used after a ';'
'DECLARE' : 'T',
'DECODE' : 'f',
'DECRYPTBYASMKEY' : 'f',
'DECRYPTBYCERT' : 'f',
'DECRYPTBYKEY' : 'f',
'DECRYPTBYKEYAUTOCERT' : 'f',
'DECRYPTBYPASSPHRASE' : 'f',
'DEFAULT' : 'k',
'DEGREES' : 'f',
'DELAY' : 'k',
'DELAYED' : 'k',
'DELETE' : 'T',
'DENSE_RANK' : 'f',
'DESC' : 'k',
'DESCRIBE' : 'k',
'DES_DECRYPT' : 'f',
'DES_ENCRYPT' : 'f',
'DETERMINISTIC' : 'k',
'DIFFERENCE' : 'f',
'DISTINCTROW' : 'k',
'DISTINCT' : 'k',
'DIV' : 'o',
'DOUBLE' : 't',
'DROP' : 'T',
'DUAL' : 'n',
'EACH' : 'k',
'ELSE' : 'k',
'ELSEIF' : 'k',
'ELT' : 'f',
'ENCLOSED' : 'k',
'ENCODE' : 'f',
'ENCRYPT' : 'f',
'ENCRYPTBYASMKEY' : 'f',
'ENCRYPTBYCERT' : 'f',
'ENCRYPTBYKEY' : 'f',
'ENCRYPTBYPASSPHRASE' : 'f',
#
# sqlserver
'EOMONTH' : 'f',
# pgsql
'ENUM_FIRST' : 'f',
'ENUM_LAST' : 'f',
'ENUM_RANGE' : 'f',
# special MS-ACCESS operator
# http://office.microsoft.com/en-001/access-help/table-of-operators-HA010235862.aspx
'EQV' : 'o',
'ESCAPED' : 'k',
# DB2, others..
# http://publib.boulder.ibm.com/infocenter/iseries/v5r4/index.jsp?topic=%2Fsqlp%2Frbafykeyu.htm
'EXCEPT' : 'U',
# TBD
#'END' : 'k',
# 'EXEC', 'EXECUTE' - MSSQL
# http://msdn.microsoft.com/en-us/library/ms175046.aspx
'EXEC' : 'T',
'EXECUTE' : 'T',
'EXISTS' : 'f',
'EXIT' : 'k',
'EXP' : 'f',
'EXPLAIN' : 'k',
'EXPORT_SET' : 'f',
'EXTRACT' : 'f',
'EXTRACTVALUE' : 'f',
'EXTRACT_VALUE' : 'f',
'EVENTDATA' : 'f',
'FALSE' : '1',
'FETCH' : 'k',
'FIELD' : 'f',
'FILE_ID' : 'f',
'FILE_IDEX' : 'f',
'FILE_NAME' : 'f',
'FILEGROUP_ID' : 'f',
'FILEGROUP_NAME' : 'f',
'FILEGROUPPROPERTY' : 'f',
'FILEPROPERTY' : 'f',
# http://www-01.ibm.com/support/knowledgecenter/#!/SSGU8G_11.50.0/com.ibm.sqls.doc/ids_sqs_1526.htm
'FILETOBLOB' : 'f',
'FILETOCLOB' : 'f',
'FIND_IN_SET' : 'f',
'FIRST_VALUE' : 'f',
'FLOAT' : 't',
'FLOAT4' : 't',
'FLOAT8' : 't',
'FLOOR' : 'f',
'FN_VIRTUALFILESTATS' : 'f',
'FORCE' : 'k',
'FOREIGN' : 'k',
'FOR' : 'n',
'FORMAT' : 'f',
'FOUND_ROWS' : 'f',
'FROM' : 'k',
# MySQL 5.6
'FROM_BASE64' : 'f',
'FROM_DAYS' : 'f',
'FROM_UNIXTIME' : 'f',
'FUNCTION' : 'k',
'FULLTEXT' : 'k',
'FULLTEXTCATALOGPROPERTY' : 'f',
'FULLTEXTSERVICEPROPERTY' : 'f',
# pgsql
'GENERATE_SERIES' : 'f',
# pgsql
'GENERATE_SUBSCRIPTS' : 'f',
# sqlserver
'GETDATE' : 'f',
# sqlserver
'GETUTCDATE' : 'f',
# pgsql
'GET_BIT' : 'f',
# pgsql
'GET_BYTE' : 'f',
'GET_FORMAT' : 'f',
'GET_LOCK' : 'f',
'GO' : 'T',
'GOTO' : 'T',
'GRANT' : 'k',
'GREATEST' : 'f',
'GROUP' : 'n',
'GROUPING' : 'f',
'GROUPING_ID' : 'f',
'GROUP_CONCAT' : 'f',
# MYSQL http://dev.mysql.com/doc/refman/5.6/en/handler.html
'HANDLER' : 'T',
'HAS_PERMS_BY_NAME' : 'f',
'HASHBYTES' : 'f',
#
# 'HAVING' - MSSQL
'HAVING' : 'B',
'HEX' : 'f',
'HIGH_PRIORITY' : 'k',
'HOUR' : 'f',
'HOUR_MICROSECOND' : 'k',
'HOUR_MINUTE' : 'k',
'HOUR_SECOND' : 'k',
# 'HOST_NAME' -- transact-sql
'HOST_NAME' : 'f',
'IDENT_CURRENT' : 'f',
'IDENT_INCR' : 'f',
'IDENT_SEED' : 'f',
'IDENTIFY' : 'f',
# 'IF - if is normally a function, except in TSQL
# http://msdn.microsoft.com/en-us/library/ms182717.aspx
'IF' : 'f',
'IF EXISTS' : 'f',
'IF NOT' : 'f',
'IF NOT EXISTS' : 'f',
'IFF' : 'f',
'IFNULL' : 'f',
'IGNORE' : 'k',
'IIF' : 'f',
# IN is a special case.. sometimes a function, sometimes a keyword
# corrected inside the folding code
'IN' : 'k',
'INDEX' : 'k',
'INDEX_COL' : 'f',
'INDEXKEY_PROPERTY' : 'f',
'INDEXPROPERTY' : 'f',
'INET_ATON' : 'f',
'INET_NTOA' : 'f',
'INFILE' : 'k',
# pgsql
'INITCAP' : 'f',
'INNER' : 'k',
'INOUT' : 'k',
'INSENSITIVE' : 'k',
'INSERT' : 'E',
'INSERT INTO' : 'T',
'INSERT IGNORE' : 'E',
'INSERT LOW_PRIORITY INTO' : 'T',
'INSERT LOW_PRIORITY' : 'E',
'INSERT DELAYED INTO' : 'T',
'INSERT DELAYED' : 'E',
'INSERT HIGH_PRIORITY INTO' : 'T',
'INSERT HIGH_PRIORITY' : 'E',
'INSERT IGNORE INTO' : 'T',
'INSTR' : 'f',
'INSTRREV' : 'f',
'INT' : 't',
'INT1' : 't',
'INT2' : 't',
'INT3' : 't',
'INT4' : 't',
'INT8' : 't',
'INTEGER' : 't',
# INTERSECT - IBM DB2, others
# http://publib.boulder.ibm.com/infocenter/iseries/v5r4/index.jsp?topic=%2Fsqlp%2Frbafykeyu.htm
'INTERSECT' : 'U',
'INTERVAL' : 'k',
'INTO' : 'k',
'IS' : 'o',
#sqlserver
'ISDATE' : 'f',
'ISEMPTY' : 'f',
# pgsql
'ISFINITE' : 'f',
'ISNULL' : 'f',
'ISNUMERIC' : 'f',
'IS_FREE_LOCK' : 'f',
#
# 'IS_MEMBER' - MSSQL
'IS_MEMBER' : 'f',
'IS_ROLEMEMBER' : 'f',
'IS_OBJECTSIGNED' : 'f',
# 'IS_SRV...' MSSQL
'IS_SRVROLEMEMBER' : 'f',
'IS_USED_LOCK' : 'f',
'ITERATE' : 'k',
'JOIN' : 'k',
'JSON_KEYS' : 'f',
'JULIANDAY' : 'f',
# pgsql
'JUSTIFY_DAYS' : 'f',
'JUSTIFY_HOURS' : 'f',
'JUSTIFY_INTERVAL' : 'f',
'KEY_ID' : 'f',
'KEY_GUID' : 'f',
'KEYS' : 'k',
'KILL' : 'k',
'LAG' : 'f',
'LAST_INSERT_ID' : 'f',
'LAST_INSERT_ROWID' : 'f',
'LAST_VALUE' : 'f',
'LASTVAL' : 'f',
'LCASE' : 'f',
'LEAD' : 'f',
'LEADING' : 'k',
'LEAST' : 'f',
'LEAVE' : 'k',
'LEFT' : 'f',
'LENGTH' : 'f',
'LIKE' : 'o',
'LIMIT' : 'B',
'LINEAR' : 'k',
'LINES' : 'k',
'LN' : 'f',
'LOAD' : 'k',
'LOAD_EXTENSION' : 'f',
'LOAD_FILE' : 'f',
# MYSQL http://dev.mysql.com/doc/refman/5.6/en/load-data.html
'LOAD DATA' : 'T',
'LOAD XML' : 'T',
# MYSQL function vs. variable
'LOCALTIME' : 'v',
'LOCALTIMESTAMP' : 'v',
'LOCATE' : 'f',
'LOCK' : 'n',
'LOG' : 'f',
'LOG10' : 'f',
'LOG2' : 'f',
'LONGBLOB' : 'k',
'LONGTEXT' : 'k',
'LOOP' : 'k',
'LOWER' : 'f',
'LOWER_INC' : 'f',
'LOWER_INF' : 'f',
'LOW_PRIORITY' : 'k',
'LPAD' : 'f',
'LTRIM' : 'f',
'MAKEDATE' : 'f',
'MAKE_SET' : 'f',
'MASKLEN' : 'f',
'MASTER_BIND' : 'k',
'MASTER_POS_WAIT' : 'f',
'MASTER_SSL_VERIFY_SERVER_CERT': 'k',
'MATCH' : 'k',
'MAX' : 'f',
'MAXVALUE' : 'k',
'MD5' : 'f',
'MEDIUMBLOB' : 'k',
'MEDIUMINT' : 'k',
'MEDIUMTEXT' : 'k',
'MERGE' : 'k',
'MICROSECOND' : 'f',
'MID' : 'f',
'MIDDLEINT' : 'k',
'MIN' : 'f',
'MINUTE' : 'f',
'MINUTE_MICROSECOND' : 'k',
'MINUTE_SECOND' : 'k',
'MOD' : 'o',
'MODE' : 'n',
'MODIFIES' : 'k',
'MONEY' : 't',
'MONTH' : 'f',
'MONTHNAME' : 'f',
'NAME_CONST' : 'f',
'NATURAL' : 'n',
'NETMASK' : 'f',
'NEXTVAL' : 'f',
'NOT' : 'o', # UNARY OPERATOR
'NOTNULL' : 'k',
'NOW' : 'f',
# oracle http://www.shift-the-oracle.com/sql/select-for-update.html
'NOWAIT' : 'k',
'NO_WRITE_TO_BINLOG' : 'k',
'NTH_VALUE' : 'f',
'NTILE' : 'f',
# NULL is treated as "variable" type
# Sure it's a keyword, but it's really more
# like a number or value.
# but we don't want it folded away
# since it's a good indicator of SQL
# ('true' and 'false' are also similar)
'NULL' : 'v',
# unknown is mysql keyword, again treat
# as 'v' type
'UNKNOWN' : 'v',
'NULLIF' : 'f',
'NUMERIC' : 't',
# MSACCESS
'NZ' : 'f',
'OBJECT_DEFINITION' : 'f',
'OBJECT_ID' : 'f',
'OBJECT_NAME' : 'f',
'OBJECT_SCHEMA_NAME' : 'f',
'OBJECTPROPERTY' : 'f',
'OBJECTPROPERTYEX' : 'f',
'OCT' : 'f',
'OCTET_LENGTH' : 'f',
'OFFSET' : 'k',
'OID' : 't',
'OLD_PASSWORD' : 'f',
# need to investigate how used
#'ON' : 'k',
'ONE_SHOT' : 'k',
# obviously not SQL but used in attacks
'OWN3D' : 'k',
# 'OPEN'
# http://msdn.microsoft.com/en-us/library/ms190500.aspx
'OPEN' : 'k',
# 'OPENDATASOURCE'
# http://msdn.microsoft.com/en-us/library/ms179856.aspx
'OPENDATASOURCE' : 'f',
'OPENXML' : 'f',
'OPENQUERY' : 'f',
'OPENROWSET' : 'f',
'OPTIMIZE' : 'k',
'OPTION' : 'k',
'OPTIONALLY' : 'k',
'OR' : '&',
'ORD' : 'f',
'ORDER' : 'n',
'ORIGINAL_DB_NAME' : 'f',
'ORIGINAL_LOGIN' : 'f',
# is a mysql reserved word but not really used
'OUT' : 'n',
'OUTER' : 'n',
'OUTFILE' : 'k',
# unusual PGSQL operator that looks like a function
'OVERLAPS' : 'f',
# pgsql
'OVERLAY' : 'f',
'PARSENAME' : 'f',
'PARTITION' : 'k',
# 'PARTITION BY' IBM DB2
# http://publib.boulder.ibm.com/infocenter/iseries/v5r4/index.jsp?topic=%2Fsqlp%2Frbafykeyu.htm
'PARTITION BY' : 'B',
# keyword "SET PASSWORD", and a function
'PASSWORD' : 'n',
'PATINDEX' : 'f',
'PATHINDEX' : 'f',
'PERCENT_RANK' : 'f',
'PERCENTILE_COUNT' : 'f',
'PERCENTILE_DISC' : 'f',
'PERCENTILE_RANK' : 'f',
'PERIOD_ADD' : 'f',
'PERIOD_DIFF' : 'f',
'PERMISSIONS' : 'f',
'PG_ADVISORY_LOCK' : 'f',
'PG_BACKEND_PID' : 'f',
'PG_CANCEL_BACKEND' : 'f',
'PG_CREATE_RESTORE_POINT' : 'f',
'PG_RELOAD_CONF' : 'f',
'PG_CLIENT_ENCODING' : 'f',
'PG_CONF_LOAD_TIME' : 'f',
'PG_LISTENING_CHANNELS' : 'f',
'PG_HAS_ROLE' : 'f',
'PG_IS_IN_RECOVERY' : 'f',
'PG_IS_OTHER_TEMP_SCHEMA' : 'f',
'PG_LS_DIR' : 'f',
'PG_MY_TEMP_SCHEMA' : 'f',
'PG_POSTMASTER_START_TIME' : 'f',
'PG_READ_FILE' : 'f',
'PG_READ_BINARY_FILE' : 'f',
'PG_ROTATE_LOGFILE' : 'f',
'PG_STAT_FILE' : 'f',
'PG_SLEEP' : 'f',
'PG_START_BACKUP' : 'f',
'PG_STOP_BACKUP' : 'f',
'PG_SWITCH_XLOG' : 'f',
'PG_TERMINATE_BACKEND' : 'f',
'PG_TRIGGER_DEPTH' : 'f',
'PI' : 'f',
'POSITION' : 'f',
'POW' : 'f',
'POWER' : 'f',
'PRECISION' : 'k',
# http://msdn.microsoft.com/en-us/library/ms176047.aspx
'PRINT' : 'T',
'PRIMARY' : 'k',
'PROCEDURE' : 'k',
'PROCEDURE ANALYSE' : 'f',
'PUBLISHINGSERVERNAME' : 'f',
'PURGE' : 'k',
'PWDCOMPARE' : 'f',
'PWDENCRYPT' : 'f',
'QUARTER' : 'f',
'QUOTE' : 'f',
# pgsql
'QUOTE_IDENT' : 'f',
'QUOTENAME' : 'f',
# pgsql
'QUOTE_LITERAL' : 'f',
# pgsql
'QUOTE_NULLABLE' : 'f',
'RADIANS' : 'f',
'RAND' : 'f',
'RANDOM' : 'f',
# http://msdn.microsoft.com/en-us/library/ms178592.aspx
'RAISEERROR' : 'E',
# 'RANDOMBLOB' - sqlite3
'RANDOMBLOB' : 'f',
'RANGE' : 'k',
'RANK' : 'f',
'READ' : 'k',
'READS' : 'k',
'READ_WRITE' : 'k',
# 'REAL' only used in data definition
'REAL' : 't',
'REFERENCES' : 'k',
# pgsql, mariadb
'REGEXP' : 'o',
'REGEXP_INSTR' : 'f',
'REGEXP_REPLACE' : 'f',
'REGEXP_MATCHES' : 'f',
'REGEXP_SUBSTR' : 'f',
'REGEXP_SPLIT_TO_TABLE' : 'f',
'REGEXP_SPLIT_TO_ARRAY' : 'f',
'REGPROC' : 't',
'REGPROCEDURE' : 't',
'REGOPER' : 't',
'REGOPERATOR' : 't',
'REGCLASS' : 't',
'REGTYPE' : 't',
'REGCONFIG' : 't',
'REGDICTIONARY' : 't',
'RELEASE' : 'k',
'RELEASE_LOCK' : 'f',
'RENAME' : 'k',
'REPEAT' : 'k',
# keyword and function
'REPLACE' : 'k',
'REPLICATE' : 'f',
'REQUIRE' : 'k',
'RESIGNAL' : 'k',
'RESTRICT' : 'k',
'RETURN' : 'k',
'REVERSE' : 'f',
'REVOKE' : 'k',
# RIGHT JOIN vs. RIGHT()
# tricky since it's a function in pgsql
# needs review
'RIGHT' : 'n',
'RLIKE' : 'o',
'ROUND' : 'f',
'ROW' : 'f',
'ROW_COUNT' : 'f',
'ROW_NUMBER' : 'f',
'ROW_TO_JSON' : 'f',
'RPAD' : 'f',
'RTRIM' : 'f',
'SCHEMA' : 'k',
'SCHEMA_ID' : 'f',
'SCHAMA_NAME' : 'f',
'SCHEMAS' : 'k',
'SCOPE_IDENTITY' : 'f',
'SECOND_MICROSECOND' : 'k',
'SEC_TO_TIME' : 'f',
'SELECT' : 'E',
'SENSITIVE' : 'k',
'SEPARATOR' : 'k',
'SERIAL' : 't',
'SERIAL2' : 't',
'SERIAL4' : 't',
'SERIAL8' : 't',
'SERVERPROPERTY' : 'f',
'SESSION_USER' : 'f',
'SET' : 'E',
'SETSEED' : 'f',
'SETVAL' : 'f',
'SET_BIT' : 'f',
'SET_BYTE' : 'f',
'SET_CONFIG' : 'f',
'SET_MASKLEN' : 'f',
'SHA' : 'f',
'SHA1' : 'f',
'SHA2' : 'f',
'SHOW' : 'n',
'SHUTDOWN' : 'T',
'SIGN' : 'f',
'SIGNBYASMKEY' : 'f',
'SIGNBYCERT' : 'f',
'SIGNAL' : 'k',
'SIMILAR' : 'k',
'SIN' : 'f',
'SLEEP' : 'f',
#
# sqlserver
'SMALLDATETIMEFROMPARTS' : 'f',
'SMALLINT' : 't',
'SMALLSERIAL' : 't',
# SOME -- acts like a function
# http://dev.mysql.com/doc/refman/5.0/en/any-in-some-subqueries.html
'SOME' : 'f',
'SOUNDEX' : 'f',
'SOUNDS' : 'o',
'SPACE' : 'f',
'SPATIAL' : 'k',
'SPECIFIC' : 'k',
'SPLIT_PART' : 'f',
'SQL' : 'k',
'SQLEXCEPTION' : 'k',
'SQLSTATE' : 'k',
'SQLWARNING' : 'k',
'SQL_BIG_RESULT' : 'k',
'SQL_BUFFER_RESULT' : 'k',
'SQL_CACHE' : 'k',
'SQL_CALC_FOUND_ROWS' : 'k',
'SQL_NO_CACHE' : 'k',
'SQL_SMALL_RESULT' : 'k',
'SQL_VARIANT_PROPERTY' : 'f',
'SQLITE_VERSION' : 'f',
'SQRT' : 'f',
'SSL' : 'k',
'STARTING' : 'k',
#pgsql
'STATEMENT_TIMESTAMP' : 'f',
'STATS_DATE' : 'f',
'STDDEV' : 'f',
'STDDEV_POP' : 'f',
'STDDEV_SAMP' : 'f',
'STRAIGHT_JOIN' : 'k',
'STRCMP' : 'f',
# STRCOMP: MS ACCESS
'STRCOMP' : 'f',
'STRCONV' : 'f',
# pgsql
'STRING_AGG' : 'f',
'STRING_TO_ARRAY' : 'f',
'STRPOS' : 'f',
'STR_TO_DATE' : 'f',
'STUFF' : 'f',
'SUBDATE' : 'f',
'SUBSTR' : 'f',
'SUBSTRING' : 'f',
'SUBSTRING_INDEX' : 'f',
'SUBTIME' : 'f',
'SUM' : 'f',
'SUSER_ID' : 'f',
'SUSER_SID' : 'f',
'SUSER_SNAME' : 'f',
'SUSER_NAME' : 'f',
'SYSDATE' : 'f',
# sql server
'SYSDATETIME' : 'f',
# sql server
'SYSDATETIMEOFFSET' : 'f',
# 'SYSCOLUMNS'
# http://msdn.microsoft.com/en-us/library/aa26039s8(v=sql.80).aspx
'SYSCOLUMNS' : 'k',
# 'SYSOBJECTS'
# http://msdn.microsoft.com/en-us/library/aa260447(v=sql.80).aspx
'SYSOBJECTS' : 'k',
# 'SYSUSERS' - MSSQL
# TBD
'SYSUSERS' : 'k',
# sqlserver
'SYSUTCDATETME' : 'f',
'SYSTEM_USER' : 'f',
'SWITCHOFFET' : 'f',
# 'TABLE'
# because SQLi really can't use 'TABLE'
# change from keyword to none
'TABLE' : 'n',
'TAN' : 'f',
'TERMINATED' : 'k',
'TERTIARY_WEIGHTS' : 'f',
'TEXT' : 't',
# TEXTPOS PGSQL 6.0
# remnamed to strpos in 7.0
# http://www.postgresql.org/message-id/20000601091055.A20245@rice.edu
'TEXTPOS' : 'f',
'TEXTPTR' : 'f',
'TEXTVALID' : 'f',
'THEN' : 'k',
# TBD
'TIME' : 'k',
'TIMEDIFF' : 'f',
'TIMEFROMPARTS' : 'f',
# pgsql
'TIMEOFDAY' : 'f',
# ms access
'TIMESERIAL' : 'f',
'TIMEVALUE' : 'f',
'TIMESTAMP' : 't',
'TIMESTAMPADD' : 'f',
'TIME_FORMAT' : 'f',
'TIME_TO_SEC' : 'f',
'TINYBLOB' : 'k',
'TINYINT' : 'k',
'TINYTEXT' : 'k',
#
# sqlserver
'TODATETIMEOFFSET' : 'f',
# pgsql
'TO_ASCII' : 'f',
# MySQL 5.6
'TO_BASE64' : 'f',
# 'TO_CHAR' -- oracle, pgsql
'TO_CHAR' : 'f',
# pgsql
'TO_HEX' : 'f',
'TO_DAYS' : 'f',
'TO_DATE' : 'f',
'TO_NUMBER' : 'f',
'TO_SECONDS' : 'f',
'TO_TIMESTAMP' : 'f',
# sqlite3
'TOTAL' : 'f',
'TOTAL_CHANGES' : 'f',
'TOP' : 'k',
# 'TRAILING' -- only used in TRIM(TRAILING
# http://www.w3resource.com/sql/character-functions/trim.php
'TRAILING' : 'n',
# pgsql
'TRANSACTION_TIMESTAMP' : 'f',
'TRANSLATE' : 'f',
'TRIGGER' : 'k',
'TRIGGER_NESTLEVEL' : 'f',
'TRIM' : 'f',
'TRUE' : '1',
'TRUNC' : 'f',
'TRUNCATE' : 'f',
# sqlserver
'TRY' : 'T',
'TRY_CAST' : 'f',
'TRY_CONVERT' : 'f',
'TRY_PARSE' : 'f',
'TYPE_ID' : 'f',
'TYPE_NAME' : 'f',
'TYPEOF' : 'f',
'TYPEPROPERTY' : 'f',
'UCASE' : 'f',
# pgsql -- used in weird unicode string
# it's an operator so its' gets folded away
'UESCAPE' : 'o',
'UNCOMPRESS' : 'f',
'UNCOMPRESS_LENGTH' : 'f',
'UNDO' : 'k',
'UNHEX' : 'f',
'UNICODE' : 'f',
'UNION' : 'U',
# 'UNI_ON' -- odd variation that comes up
'UNI_ON' : 'U',
# 'UNIQUE'
# only used as a function (DB2) or as "CREATE UNIQUE"
'UNIQUE' : 'n',
'UNIX_TIMESTAMP' : 'f',
'UNLOCK' : 'k',
'UNNEST' : 'f',
'UNSIGNED' : 'k',
'UPDATE' : 'E',
'UPDATEXML' : 'f',
'UPPER' : 'f',
'UPPER_INC' : 'f',
'UPPER_INF' : 'f',
'USAGE' : 'k',
'USE' : 'T',
# transact-sql function
# however treating as a 'none' type
# since 'user_id' is such a common column name
# TBD
'USER_ID' : 'n',
'USER_NAME' : 'n',
# 'USER' -- a MySQL function
# handled in folding step
'USER' : 'n',
'USING' : 'f',
# next 3 TBD
'UTC_DATE' : 'k',
'UTC_TIME' : 'k',
'UTC_TIMESTAMP' : 'k',
'UUID' : 'f',
'UUID_SHORT' : 'f',
'VALUES' : 'k',
'VARBINARY' : 'k',
'VARCHAR' : 't',
'VARCHARACTER' : 'k',
'VARIANCE' : 'f',
'VAR' : 'f',
'VARP' : 'f',
'VARYING' : 'k',
'VAR_POP' : 'f',
'VAR_SAMP' : 'f',
'VERIFYSIGNEDBYASMKEY' : 'f',
'VERIFYSIGNEDBYCERT' : 'f',
'VERSION' : 'f',
'VOID' : 't',
# oracle http://www.shift-the-oracle.com/sql/select-for-update.html
'WAIT' : 'k',
'WAITFOR' : 'n',
'WEEK' : 'f',
'WEEKDAY' : 'f',
'WEEKDAYNAME' : 'f',
'WEEKOFYEAR' : 'f',
'WHEN' : 'k',
'WHERE' : 'k',
'WHILE' : 'T',
# pgsql
'WIDTH_BUCKET' : 'f',
# it's a keyword, but it's too ordinary in English
'WITH' : 'n',
# XML... oracle, pgsql
'XMLAGG' : 'f',
'XMLELEMENT' : 'f',
'XMLCOMMENT' : 'f',
'XMLCONCAT' : 'f',
'XMLFOREST' : 'f',
'XMLFORMAT' : 'f',
'XMLTYPE' : 'f',
'XMLPI' : 'f',
'XMLROOT' : 'f',
'XMLEXISTS' : 'f',
'XML_IS_WELL_FORMED' : 'f',
'XPATH' : 'f',
'XPATH_EXISTS' : 'f',
'XOR' : '&',
'XP_EXECRESULTSET' : 'k',
'YEAR' : 'f',
'YEARWEEK' : 'f',
'YEAR_MONTH' : 'k',
'ZEROBLOB' : 'f',
'ZEROFILL' : 'k',
'DBMS_LOCK.SLEEP' : 'f',
'DBMS_UTILITY.SQLID_TO_SQLHASH': 'f',
'USER_LOCK.SLEEP' : 'f',
#
'!=': 'o', # oracle
'||': '&',
'&&': '&',
'>=': 'o',
'>>': 'o',
'<=': 'o',
'<>': 'o',
':=': 'o',
'::': 'o',
'<<': 'o',
'!<': 'o', # http://msdn.microsoft.com/en-us/library/ms188074.aspx
'!>': 'o', # http://msdn.microsoft.com/en-us/library/ms188074.aspx
'+=': 'o',
'-=': 'o',
'*=': 'o',
'/=': 'o',
'%=': 'o',
'|=': 'o',
'&=': 'o',
'^=': 'o',
'|/': 'o', # http://www.postgresql.org/docs/9.1/static/functions-math.html
'!!': 'o', # http://www.postgresql.org/docs/9.1/static/functions-math.html
'~*': 'o', # http://www.postgresql.org/docs/9.1/static/functions-matching.html
# problematic since ! and ~ are both unary operators in other db engines
# converting to one unary operator is probably ok
# '!~', # http://www.postgresql.org/docs/9.1/static/functions-matching.html
'@>': 'o',
'<@': 'o',
# '!~*'
# pgsql "AT TIME ZONE"
'AT TIME' : 'n',
'AT TIME ZONE' : 'k',
'IN BOOLEAN' : 'n',
'IN BOOLEAN MODE' : 'k',
# IS DISTINCT - IBM DB2
# http://publib.boulder.ibm.com/infocenter/iseries/v5r4/index.jsp?topic=%2Fsqlp%2Frbafykeyu.htm
'IS DISTINCT FROM' : 'o',
'IS DISTINCT' : 'n',
'IS NOT DISTINCT FROM' : 'o',
'IS NOT DISTINCT' : 'n',
'CROSS JOIN' : 'k',
'INNER JOIN' : 'k',
'ALTER DOMAIN' : 'k',
'ALTER TABLE' : 'k',
'GROUP BY' : 'B',
'ORDER BY' : 'B',
'OWN3D BY' : 'B',
'READ WRITE' : 'k',
# 'LOCAL TABLE' pgsql/oracle
# http://www.postgresql.org/docs/current/static/sql-lock.html
'LOCK TABLE' : 'k',
# 'LOCK TABLES' MYSQL
# http://dev.mysql.com/doc/refman/4.1/en/lock-tables.html
'LOCK TABLES' : 'k',
'LEFT OUTER' : 'k',
'LEFT OUTER JOIN' : 'k',
'LEFT JOIN' : 'k',
'RIGHT OUTER' : 'k',
'RIGHT OUTER JOIN' : 'k',
'RIGHT JOIN' : 'k',
# http://technet.microsoft.com/en-us/library/ms187518(v=sql.105).aspx
'FULL JOIN' : 'k',
'FULL OUTER' : 'k',
'FULL OUTER JOIN' : 'k',
'NATURAL JOIN' : 'k',
'NATURAL INNER' : 'k',
'NATURAL OUTER' : 'k',
'NATURAL LEFT' : 'k',
'NATURAL LEFT OUTER': 'k',
'NATURAL LEFT OUTER JOIN': 'k',
'NATURAL RIGHT OUTER JOIN': 'k',
'NATURAL FULL OUTER JOIN': 'k',
'NATURAL RIGHT' : 'k',
'NATURAL FULL' : 'k',
'SOUNDS LIKE' : 'o',
'IS NOT' : 'o',
# IBM DB2
# http://publib.boulder.ibm.com/infocenter/iseries/v5r4/index.jsp?topic=%2Fsqlp%2Frbafykeyu.htm
'NEXT VALUE' : 'n',
'NEXT VALUE FOR' : 'k',
'PREVIOUS VALUE' : 'n',
'PREVIOUS VALUE FOR' : 'k',
'NOT LIKE' : 'o',
'NOT BETWEEN' : 'o',
'NOT SIMILAR' : 'o',
# 'NOT RLIKE' -- MySQL
'NOT RLIKE' : 'o',
'NOT REGEXP' : 'o',
'NOT IN' : 'k',
'SIMILAR TO' : 'o',
'NOT SIMILAR TO' : 'o',
'SELECT DISTINCT' : 'E',
'UNION ALL' : 'U',
'UNION DISTINCT' : 'U',
'UNION DISTINCT ALL' : 'U',
'UNION ALL DISTINCT' : 'U',
# INTO..
# http://dev.mysql.com/doc/refman/5.0/en/select.html
'INTO OUTFILE' : 'k',
'INTO DUMPFILE' : 'k',
'WAITFOR DELAY' : 'E',
'WAITFOR TIME' : 'E',
'WAITFOR RECEIVE' : 'E',
'WITH ROLLUP' : 'k',
# 'INTERSECT ALL' -- ORACLE
'INTERSECT ALL' : 'U',
# hacker mistake
'SELECT ALL' : 'E',
# types
'DOUBLE PRECISION': 't',
'CHARACTER VARYING': 't',
# MYSQL
# http://dev.mysql.com/doc/refman/5.1/en/innodb-locking-reads.html
'LOCK IN': 'n',
'LOCK IN SHARE': 'n',
'LOCK IN SHARE MODE': 'k',
# MYSQL
# http://dev.mysql.com/doc/refman/5.1/en/innodb-locking-reads.html
'FOR UPDATE': 'k',
# TSQL (MS)
# http://msdn.microsoft.com/en-us/library/ms175046.aspx
'EXECUTE AS': 'E',
'EXECUTE AS LOGIN': 'E',
# ORACLE
# http://www.shift-the-oracle.com/sql/select-for-update.html
'FOR UPDATE OF': 'k',
'FOR UPDATE WAIT': 'k',
'FOR UPDATE NOWAIT': 'k',
'FOR UPDATE SKIP': 'k',
'FOR UPDATE SKIP LOCKED': 'k'
}
CHARMAP = [
'CHAR_WHITE', # 0
'CHAR_WHITE', # 1
'CHAR_WHITE', # 2
'CHAR_WHITE', # 3
'CHAR_WHITE', # 4
'CHAR_WHITE', # 5
'CHAR_WHITE', # 6
'CHAR_WHITE', # 7
'CHAR_WHITE', # 8
'CHAR_WHITE', # 9
'CHAR_WHITE', # 10
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE', # 20
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE',
'CHAR_WHITE', # 30
'CHAR_WHITE', # 31
'CHAR_WHITE', # 32
'CHAR_BANG', # 33 !
'CHAR_STR', # 34 "
'CHAR_HASH', # 35 "#"
'CHAR_MONEY', # 36 $
'CHAR_OP1', # 37 %
'CHAR_OP2', # 38 &
'CHAR_STR', # 39 '
'CHAR_LEFTPARENS', # 40 (
'CHAR_RIGHTPARENS', # 41 )
'CHAR_OP2', # 42 *
'CHAR_UNARY', # 43 +
'CHAR_COMMA', # 44 ,
'CHAR_DASH', # 45 -
'CHAR_NUM', # 46 .
'CHAR_SLASH', # 47 /
'CHAR_NUM', # 48 0
'CHAR_NUM', # 49 1
'CHAR_NUM', # 50 2
'CHAR_NUM', # 51 3
'CHAR_NUM', # 52 4
'CHAR_NUM', # 53 5
'CHAR_NUM', # 54 6
'CHAR_NUM', # 55 7
'CHAR_NUM', # 56 8
'CHAR_NUM', # 57 9
'CHAR_OP2', # 58 : colon
'CHAR_SEMICOLON', # 59 ; semiclon
'CHAR_OP2', # 60 <
'CHAR_OP2', # 61 =
'CHAR_OP2', # 62 >
'CHAR_OTHER', # 63 ? BEEP BEEP
'CHAR_VAR', # 64 @
'CHAR_WORD', # 65 A
'CHAR_BSTRING', # 66 B
'CHAR_WORD', # 67 C
'CHAR_WORD', # 68 D
'CHAR_ESTRING', # 69 E
'CHAR_WORD', # 70 F
'CHAR_WORD', # 71 G
'CHAR_WORD', # 72 H
'CHAR_WORD', # 73 I
'CHAR_WORD', # 74 J
'CHAR_WORD', # 75 K
'CHAR_WORD', # 76 L
'CHAR_WORD', # 77 M
'CHAR_NQSTRING', # 78 N
'CHAR_WORD', # 79 O
'CHAR_WORD', # 80 P
'CHAR_QSTRING', # 81 Q
'CHAR_WORD', # 82 R
'CHAR_WORD', # 83 S
'CHAR_WORD', # 84 T
'CHAR_USTRING', # 85 U special pgsql unicode
'CHAR_WORD', # 86 V
'CHAR_WORD', # 87 W
'CHAR_XSTRING', # 88 X
'CHAR_WORD', # 89 Y
'CHAR_WORD', # 90 Z
'CHAR_BWORD', # 91 [ B for Bracket, for Microsoft SQL SERVER
'CHAR_BACK', # 92 \\
'CHAR_OTHER', # 93 ]
'CHAR_OP1', # 94 ^
'CHAR_WORD', # 95 _ underscore
'CHAR_TICK', # 96 ` backtick
'CHAR_WORD', # 97 a
'CHAR_BSTRING', # 98 b
'CHAR_WORD', # 99 c
'CHAR_WORD', # 100 d
'CHAR_ESTRING', # 101 e
'CHAR_WORD', # 102 f
'CHAR_WORD', # 103 g
'CHAR_WORD', # 104 h
'CHAR_WORD', # 105 i
'CHAR_WORD', # 106 j
'CHAR_WORD', # 107 k
'CHAR_WORD', # 108 l
'CHAR_WORD', # 109 m
'CHAR_NQSTRING', # 110 n special oracle code
'CHAR_WORD', # 111 o
'CHAR_WORD', # 112 p
'CHAR_QSTRING', # 113 q special oracle code
'CHAR_WORD', # 114 r
'CHAR_WORD', # 115 s
'CHAR_WORD', # 116 t
'CHAR_USTRING', # 117 u special pgsql unicode
'CHAR_WORD', # 118 v
'CHAR_WORD', # 119 w
'CHAR_XSTRING', # 120 x
'CHAR_WORD', # 121 y
'CHAR_WORD', # 122 z
'CHAR_LEFTBRACE', # 123 { left brace
'CHAR_OP2', # 124 | pipe
'CHAR_RIGHTBRACE', # 125 } right brace
'CHAR_UNARY', # 126 ~
'CHAR_WHITE', # 127
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', # 130
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #140
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #150
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WHITE', #160 0xA0 latin1 whitespace
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #170
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #180
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #190
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #200
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #210
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #220
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #230
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #240
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD', #250
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD',
'CHAR_WORD'
]
import json
def get_fingerprints():
"""
fingerprints are stored in plain text file, one fingerprint per file
the result is sorted
"""
with open('fingerprints.txt', 'r') as lines:
sqlipat = [line.strip() for line in lines]
return sorted(sqlipat)
The provided code snippet includes necessary dependencies for implementing the `dump` function. Write a Python function `def dump()` to solve the following problem:
generates a JSON file, sorted keys
Here is the function:
def dump():
"""
generates a JSON file, sorted keys
"""
objs = {
'keywords': KEYWORDS,
'charmap': CHARMAP,
'fingerprints': get_fingerprints()
}
return json.dumps(objs, sort_keys=True, indent=4) | generates a JSON file, sorted keys |
18,225 |
The provided code snippet includes necessary dependencies for implementing the `toc` function. Write a Python function `def toc(obj)` to solve the following problem:
main routine
Here is the function:
def toc(obj):
""" main routine """
if False:
print 'fingerprints = {'
for fp in sorted(obj[u'fingerprints']):
print "['{0}']='X',".format(fp)
print '}'
words = {}
keywords = obj['keywords']
for k,v in keywords.iteritems():
words[str(k)] = str(v)
for fp in list(obj[u'fingerprints']):
fp = '0' + fp.upper()
words[str(fp)] = 'F';
print 'words = {'
for k in sorted(words.keys()):
#print "['{0}']='{1}',".format(k, words[k])
print "['{0}']={1},".format(k, ord(words[k]))
print '}'
return 0 | main routine |
18,226 |
The provided code snippet includes necessary dependencies for implementing the `make_lua_table` function. Write a Python function `def make_lua_table(obj)` to solve the following problem:
Generates table. Fingerprints don't contain any special chars so they don't need to be escaped. The output may be sorted but it is not required.
Here is the function:
def make_lua_table(obj):
"""
Generates table. Fingerprints don't contain any special chars
so they don't need to be escaped. The output may be
sorted but it is not required.
"""
fp = obj[u'fingerprints']
print("sqlifingerprints = {")
for f in fp:
print(' ["{0}"]=true,'.format(f))
print("}")
return 0 | Generates table. Fingerprints don't contain any special chars so they don't need to be escaped. The output may be sorted but it is not required. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.