Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
3,700
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os.path
import pdb
import pandas as pd
from pprint import pprint
#DATA_DIR = '/home/brandon/terabyte/Datasets/ubuntu_dialogue_corpus/'
DATA_DIR = '/home/brandon/ubuntu_dialogue_corpus/src/' # sample/'
TRAIN_PATH = DATA_DIR + 'train.csv'
VALID_PATH = DATA_DIR + 'valid.csv'
TEST_PATH = DATA_DIR + 'test.csv'
def get_training():
Returns dataframe data from train.csv
# First, we need to load the data directly into a dataframe from the train.csv file.
df_train = pd.read_csv(TRAIN_PATH)
# Remove all examples with label = 0. (why would i want to train on false examples?)
df_train = df_train.loc[df_train['Label'] == 1.0]
# Don't care about the pandas indices in the df, so remove them.
df_train = df_train.reset_index(drop=True)
df_train = df_train[df_train.columns[:2]]
return df_train
def get_validation():
Returns data from valid.csv
# First, we need to load the data directly into a dataframe from the train.csv file.
df_valid = pd.read_csv(VALID_PATH)
first_two_cols = df_valid.columns[:2]
df_valid = df_valid[first_two_cols]
df_valid.columns = ['Context', 'Utterance']
return df_valid
df_train = get_training()
df_valid = get_validation()
# Now get all of the data in a single string and make a 'vocabulary' (unique words).
import nltk, re, pprint
from nltk import word_tokenize
import pdb
def print_single_turn(turn: str):
as_list_of_utters = turn.split('__eou__')[:-1]
for idx_utter, utter in enumerate(as_list_of_utters):
print("\t>>>", utter)
def print_conversation(df, index=0):
Display the ith conversation in nice format.
# Get the row identified by 'index'.
context_entry = df['Context'].values[index]
target = df['Utterance'].values[index]
# Split returns a blank last entry, so don't store.
turns = context_entry.split('__eot__')[:-1]
print('--------------------- CONTEXT ------------------- ')
for idx_turn, turn in enumerate(turns):
print("\nUser {}: ".format(idx_turn % 2))
print_single_turn(turn)
print('\n--------------------- RESPONSE ------------------- ')
print("\nUser {}: ".format(len(turns) % 2))
print_single_turn(target)
def get_user_arrays(df):
Returns two arrays of every other turn.
Specifically:
len(returned array) is number of rows in df. I SURE HOPE NOT!
each entry is a numpy array.
each numpy array contains utterances as entries.
userOne = []
userTwo = []
contexts = df['Context'].values
targets = df['Utterance'].values
assert(len(contexts) == len(targets))
for i in range(len(contexts)):
# combined SINGLE CONVERSATION ENTRY of multiple turns each with multiple utterances.
list_of_turns = contexts[i].lower().split('__eot__')[:-1] + [targets[i].lower()]
# make sure even number of entries
if len(list_of_turns) % 2 != 0:
list_of_turns = list_of_turns[:-1]
# strip out the __eou__ occurences (leading space bc otherwise would result in two spaces)
new_list_of_turns = []
for turn in list_of_turns:
utter_list = turn.lower().split(" __eou__")
#if len(utter_list) > 3:
# utter_list = utter_list[:3]
new_list_of_turns.append("".join(utter_list))
#list_of_turns = [re.sub(' __eou__', '', t) for t in list_of_turns]
userOneThisConvo = new_list_of_turns[0::2]
userTwoThisConvo = new_list_of_turns[1::2]
userOne += userOneThisConvo
userTwo += userTwoThisConvo
assert(len(userOne) == len(userTwo))
return userOne, userTwo
def save_to_file(fname, arr):
with open(DATA_DIR+fname,"w") as f:
for line in arr:
f.write(line + "\n")
df_train.describe()
pd.options.display.max_colwidth = 500
df_train.head(2)
print_conversation(df_train, 3)
#df_merged = pd.DataFrame(df_train['Context'].map(str) + df_train['Utterance'])
userOne, userTwo = get_user_arrays(df_train)
df_turns = pd.DataFrame({'UserOne': userOne, 'UserTwo': userTwo})
df_turns.head(200)
userOne[0]
def get_sentences(userOne, userTwo):
encoder = []
decoder = []
assert(len(userOne) == len(userTwo))
for i in range(len(userOne)):
one = nltk.sent_tokenize(userOne[i])
one = [s for s in one if s != '.']
two = nltk.sent_tokenize(userTwo[i])
two = [s for s in two if s != '.']
combine = one + two
assert(len(combine) == len(one) + len(two))
if len(combine) % 2 != 0:
combine = combine[:-1]
enc = combine[0::2]
dec = combine[1::2]
assert(len(enc) == len(dec))
encoder.append(enc)
decoder.append(dec)
return encoder, decoder
encoder, decoder = get_sentences(userOne, userTwo)
print('done')
encoder = [nltk.word_tokenize(s[0]) for s in encoder]
decoder = [nltk.word_tokenize(s[0]) for s in decoder]
max_enc_len = max([len(s) for s in encoder])
max_dec_len = max([len(s) for s in decoder])
print(max_enc_len)
print(max_dec_len)
encoder_lengths = [len(s) for s in encoder]
decoder_lengths = [len(s) for s in decoder]
df_lengths = pd.DataFrame({'EncoderSentLength': encoder_lengths, 'DecoderSentLengths': decoder_lengths})
df_lengths.describe()
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = 9, 5
fig, axes = plt.subplots(nrows=1, ncols=2)
plt.subplot(1, 2, 1)
plt.hist(encoder_lengths)
plt.subplot(1, 2, 2)
plt.hist(decoder_lengths, color='b')
plt.tight_layout()
plt.show()
save_to_file("train_from.txt", userOne)
save_to_file("train_to.txt", userTwo)
print("df_valid has", len(df_valid), "rows.")
df_valid.head()
userOne, userTwo = get_user_arrays(df_valid)
save_to_file("valid_from.txt", userOne)
save_to_file("valid_to.txt", userTwo)
print('done')
import matplotlib.pyplot as plt
%matplotlib inline
userOne, userTwo = get_user_arrays(df_train)
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
lengths = np.array([len(t.strip().split()) for t in userOne])
max_ind = lengths.argmax()
print(max(lengths), "at", max_ind)
print("Sentence:\n", userOne[max_ind])
import matplotlib.pyplot as plt
plt.hist(sorted(lengths)[:-20])
n_under_20 = sum([1 if l < 100 else 0 for l in lengths])
print(n_under_20, "out of", len(lengths), "({}\%)".format(float(n_under_20)/len(lengths)))
df_lengths = pd.DataFrame(lengths)
df_lengths.describe()
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
# Number of gradient descent steps, each over a batch_size amount of data.
vocab_size = 40000
# Uniform chance of guessing any word.
loss_random_guess = np.log(float(vocab_size))
print("Loss for uniformly random guessing is", loss_random_guess)
sent_length = [5, 10, 25]
# Outputs correct target x percent of the time.
pred_accuracy = np.arange(100)
plt.plot(pred_accuracy, [1./p for p in pred_accuracy])
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = 10, 8
def _sample(logits, t):
res = logits / t
res = np.exp(res) / np.sum(np.exp(res))
return res
N = 100
x = np.arange(N)
before = np.array([1.0+i**2 for i in range(N)])
before /= before.sum()
plt.plot(x, before, 'b--', label='before')
after = _sample(before, 0.1)
plt.plot(x, after, 'g--', label='temp=0.01')
after = _sample(before, 0.2)
print(after.argmax())
plt.plot(x, after, 'r--', label='temp=0.001')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
np.info(plt.plot)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Reformatting Ubuntu Dialogue Corpus for Chatbot Model
Step5: Functions for Visualization and Reformatting
Step6: Training Data
Step7: Turn-Based DataFrame
Step8: Sentence-Based DataFrame
Step9: Analyzing Sentence Lengths
Step10: Validation Data
Step11: Visualization
Step12: Relationship between Accuracy, Loss, and Others
|
3,701
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
import speclite
from desisim.io import read_basis_templates
import matplotlib
import seaborn as sns
%matplotlib inline
sns.set(style='white', font_scale=1.8, font='sans-serif')
colors = sns.color_palette('Set2', n_colors=8, desat=0.75)
#colors = iter([_colors[1], _colors[2], _colors[0], _colors[3], _colors[4]])
seed = 1
rand = np.random.RandomState(seed)
fnew, wnew, mnew = read_basis_templates('STAR')
fold, wold, mold = read_basis_templates('STAR', infile=os.path.join(
os.getenv('DESI_ROOT'), 'spectro', 'templates',
'basis_templates', 'v2.6', 'star_templates_v2.2.fits'))
len(mnew), wnew.min(), wnew.max(), len(mold), wold.min(), wold.max()
def qa_physical(new=True):
if new:
meta = mnew
title = 'v3.0 Templates'
col = colors[0]
else:
meta = mold
title = 'v2.2 Templates'
col = colors[1]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 4))
ax1.scatter(meta['TEFF'], meta['LOGG'], color=col)
ax1.set_xscale('log')
ax1.set_xticks([3000, 5000, 1E4, 2E4])
ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax1.set_xlabel('$T_{eff}$ (K)')
ax1.set_ylabel('$\log g$ (cm s$^{-2}$)')
#ax1.legend(loc='lower right', fontsize=10, markerscale=1.0)
ax2.scatter(meta['TEFF'], meta['FEH'], color=col)
ax2.set_xscale('log')
ax2.set_xticks([3000, 5000, 1E4, 2E4])
ax2.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax2.set_xlabel('$T_{eff}$ (K)')
ax2.set_ylabel('[Fe/H]')
ax3.scatter(meta['LOGG'], meta['FEH'], color=col)
ax3.set_ylabel('[Fe/H]')
ax3.set_xlabel('$\log g$ (cm s$^{-2}$)')
fig.suptitle(title, fontsize=18)
fig.subplots_adjust(wspace=0.22)
qa_physical(new=False)
qa_physical(new=True)
class star_KDTree(object):
def __init__(self, meta):
logteff = np.log10(meta['TEFF'].data)
logg = meta['LOGG']
feh = meta['FEH']
self.param_min = ( logteff.min(), logg.min(), feh.min() )
self.param_range = ( np.ptp(logteff), np.ptp(logg), np.ptp(feh) )
self.KDTree = self.KDTree_build( np.vstack((logteff, logg, feh)).T )
def KDTree_rescale(self, matrix):
Normalize input parameters to [0, 1].
nobj, ndim = matrix.shape
return ( (matrix - np.tile(self.param_min, nobj).reshape(nobj, ndim)) /
np.tile( self.param_range, nobj).reshape(nobj, ndim) )
def KDTree_build(self, matrix):
Build a KD-tree.
from scipy.spatial import cKDTree as KDTree
return KDTree( self.KDTree_rescale(matrix) )
def KDTree_query(self, matrix, return_dist=False):
Return the nearest template number based on the KD Tree.
dist, indx = self.KDTree.query( self.KDTree_rescale(matrix) )
if return_dist:
return dist, indx
else:
return indx
def plot_subset(nplot=32, ncol=4, these=None, xlim=(3000, 10000),
loc='right', targname='', objtype='', seed=None):
Plot a random sampling of new and old stellar spectra.
rand = np.random.RandomState(seed)
nrow = np.ceil(nplot / ncol).astype('int')
# Choose a random subset of the old templates and then find
# the nearest (in physical parameter space) new template.
if these is None:
tree = star_KDTree(mnew)
indxold = rand.choice(len(mold), nplot, replace=False)
#indxold = indxold[np.argsort(mold['TEFF'][indxold])]
indxnew = tree.KDTree_query( np.vstack((
np.log10(mold['TEFF'][indxold]),
mold['LOGG'][indxold], mold['FEH'][indxold])).T )
#print(mold['TEFF'][indxold].data / mnew['TEFF'][indxnew].data)
#print(indxold, indxnew)
w1 = (wold > 5500) * (wold < 5550)
w2 = (wnew > 5500) * (wnew < 5550)
fig, ax = plt.subplots(nrow, ncol, figsize=(4*ncol, 3*nrow),
sharey=False, sharex=True)
for thisax, iold, inew in zip(ax.flat, indxold, indxnew):
thisax.plot(wnew, fnew[inew, :] / np.median(fnew[inew, w2]))
thisax.plot(wold, fold[iold, :] / np.median(fold[iold, w1]))
lbl0 = 'ID: {}, {}'.format(iold, inew)
lbl1 = r'Teff: ${:.2f}, {:.2f}$'.format(mold['TEFF'][iold], mnew['TEFF'][inew])
lbl2 = r'logg: ${:.2f}, {:.2f}$'.format(mold['LOGG'][iold], mnew['LOGG'][inew])
lbl3 = r'[Fe/H]: ${:.2f}, {:.2f}$'.format(mold['FEH'][iold], mnew['FEH'][inew])
#label = 'Old={}, New={}'.format(iold, inew)
label = lbl0+'\n'+lbl1+'\n'+lbl2+'\n'+lbl3
#label = 'Old={}, New={}'.format(iold, inew)
if mold['TEFF'][iold] >= 4500:
xtxt, ytxt, ha = 0.93, 0.93, 'right'
else:
xtxt, ytxt, ha = 0.05, 0.93, 'left'
thisax.text(xtxt, ytxt, label, ha=ha, va='top',
transform=thisax.transAxes, fontsize=13)
thisax.xaxis.set_major_locator(plt.MaxNLocator(3))
#thisax.set_xscale('log')
#thisax.set_yscale('log')
if xlim:
thisax.set_xlim(xlim)
for thisax in ax.flat:
thisax.yaxis.set_ticks([])
thisax.margins(0.2)
fig.suptitle(targname)
fig.subplots_adjust(wspace=0.05, hspace=0.05, top=0.93)
#indxold = 731
#mold[indxold]
#tree = star_KDTree(mnew)
#indxnew = tree.KDTree_query( np.vstack((np.log10(mold['TEFF'][indxold]),
# mold['LOGG'][indxold], mold['FEH'][indxold])).T )
#mnew[indxnew]
#mnew[(mnew['TEFF'] == 5500) * (mnew['FEH'] == -2.5) * (mnew['LOGG'] == 0)]
plot_subset()
def star_colors(new=True):
Read the stellar templates, synthesize photometry, and return colors.
if new:
bands = ('g', 'r', 'z')
filts = ('decam2014-g', 'decam2014-r', 'decam2014-z')
filt = speclite.filters.load_filters(*filts)
flux, wave, meta = fnew, wnew, mnew
else:
bands = ('g', 'r', 'z', 'W1', 'W2')
filts = ('decam2014-g', 'decam2014-r', 'decam2014-z', 'wise2010-W1', 'wise2010-W2')
filt = speclite.filters.load_filters(*filts)
flux, wave, meta = fold, wold, mold
nt = len(meta)
print('Synthesizing photometry for {} templates.'.format(nt))
phot = filt.get_ab_maggies(flux, wave, mask_invalid=False)
for ff, bb in zip( phot.colnames, bands ):
phot.rename_column(ff, bb)
#synthflux = np.vstack( [phot[ff].data for ff in filts] )
colors = dict(
r = 22.5 - 2.5 * np.log10(phot['r']),
gr = -2.5 * np.log10(phot['g'] / phot['r']),
rz = -2.5 * np.log10(phot['r'] / phot['z']),
gz = -2.5 * np.log10(phot['g'] / phot['z']))
if 'W1' in bands:
colors.update( {
'rW1': -2.5 * np.log10(phot['r'] / phot['W1']),
'zW1': -2.5 * np.log10(phot['z'] / phot['W1']) } )
return colors
newcol = star_colors()
oldcol = star_colors(new=False)
grrange = (-0.6, 2.2)
gzrange = (0.0, 4.0)
rzrange = (-0.6, 2.8)
zW1range = (-2.5, 0.0)
def qa_colorcolor(pickles=False, pngfile=None):
fig, ax = plt.subplots(figsize=(8, 6))
if pickles:
ax.scatter(picklecol['rz'], picklecol['gr'], marker='s',
s=20, linewidth=1, alpha=0.5, label='Pickles+98')#, c='r')
ax.scatter(newcol['rz'], newcol['gr'], marker='s', color=colors[1],
s=3, linewidth=1, alpha=1.0, label='v3.0 Templates')#, c='b')
ax.scatter(oldcol['rz'], oldcol['gr'], marker='o', color=colors[0],
s=15, linewidth=1, alpha=0.8, label='v2.2 Templates')#, c='b')
ax.set_xlabel('r - z')
ax.set_ylabel('g - r')
ax.set_xlim(rzrange)
ax.set_ylim(grrange)
lgnd = ax.legend(loc='upper left', frameon=False, fontsize=18)
lgnd.legendHandles[0]._sizes = [100]
lgnd.legendHandles[1]._sizes = [100]
if pickles:
lgnd.legendHandles[2]._sizes = [100]
if pngfile:
fig.savefig(pngfile)
qa_colorcolor(pickles=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the old (v2.2) and new (v3.0) templates here for convenience.
Step2: Compare the coverage of physical parameter space.
Step7: Directly compare the v2.2 and v3.0 templates
Step9: Color-color plots
|
3,702
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy import optimize
# training data
x_train = np.array(([3,5],[5,1],[10,2],[6,1.5]), dtype=float)
y_train = np.array(([75],[82],[93],[70]), dtype=float)
# testing data
x_test = np.array(([4, 5.5],[4.5, 1],[9,2.5],[6,2]), dtype=float)
y_test = np.array(([70],[89],[85],[75]), dtype=float)
def scale_data(hours, test_score):
MAX_SCORE = 100
hours = hours / np.amax(hours, axis=0)
test_score /= MAX_SCORE
return hours, test_score
# normalize data
x_train, y_train = scale_data(x_train, y_train)
x_test, y_test = scale_data(x_test, y_test)
x_train
y_train
class Neural_Network(object):
def __init__(self):
# define hyperparameters
self.input_layer_size = 2
self.hidden_layer_size = 3
self.output_layer_size = 1
#define parameters
self.W1 = np.random.randn(self.input_layer_size, self.hidden_layer_size)
self.W2 = np.random.randn(self.hidden_layer_size, self.output_layer_size)
# forward propagation
def forward(self, X):
self.z2 = np.dot(X, self.W1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.W2)
prediction = self.sigmoid(self.z3)
return prediction
# activation functions
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
NN = Neural_Network()
NN.forward(x_train)
y_train
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <p>The importance in separating testing data from training data is to always make sure the model is according to the real world by comparing to testing values. The test data doesn’t go through the same pipelines as the training data does. The input data values are passed through the input neurons as matrices for <strong>computational speed-ups</strong>. </p>
Step2: <p>The next phase now for the input is for it to multiplied using the <strong>dot product</strong> to the first set of weights on the first layer of synapses. With a total of 6 weights where three are connected to each input neuron, a 2x3 matrix is formed
|
3,703
|
<ASSISTANT_TASK:>
Python Code:
import rebound
sim = rebound.Simulation()
sim.add(m=1., x=1., vz = 2.)
sim.add(m=1., a=1.)
sim.status()
sim.add(m=1.e-3, a=100.)
sim.add(primary=sim.particles[1], a=0.01)
print(sim.particles[1].a)
orbits = sim.calculate_orbits()
for orbit in orbits:
print(orbit)
print(sim.particles[3].calculate_orbit(primary=sim.particles[1]))
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0., inc=0.1, Omega=0.3, omega=0.1)
print(sim.particles[1].orbit)
print(sim.particles[1].theta)
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0., inc=0.1, Omega=0.3, theta = 0.4)
print(sim.particles[1].theta)
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.2, Omega=0.1)
print(sim.particles[1].orbit)
print(sim.particles[1].pomega)
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.2, pomega=0.1)
print(sim.particles[1].orbit)
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.1, Omega=0.3, M = 0.1)
sim.add(a=1., Omega=0.3, l = 0.4)
print(sim.particles[1].l)
print(sim.particles[2].l)
sim.particles[2].theta
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1., e=0.1, omega=1.)
print(sim.particles[1].orbit)
import random
import numpy as np
def simulation(par):
e,f = par
e = 10**e
f = 10**f
sim = rebound.Simulation()
sim.add(m=1.)
a = 1.
inc = random.random()*np.pi
Omega = random.random()*2*np.pi
sim.add(m=0.,a=a,e=e,inc=inc,Omega=Omega, f=f)
o=sim.particles[1].orbit
if o.f < 0: # avoid wrapping issues
o.f += 2*np.pi
err = max(np.fabs(o.e-e)/e, np.fabs(o.f-f)/f)
return err
random.seed(1)
N = 100
es = np.linspace(-16.,-1.,N)
fs = np.linspace(-16.,-1.,N)
params = [(e,f) for e in es for f in fs]
pool=rebound.InterruptiblePool()
res = pool.map(simulation, params)
res = np.array(res).reshape(N,N)
res = np.nan_to_num(res)
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.colors import LogNorm
import matplotlib
f,ax = plt.subplots(1,1,figsize=(7,5))
extent=[fs.min(), fs.max(), es.min(), es.max()]
ax.set_xlim(extent[0], extent[1])
ax.set_ylim(extent[2], extent[3])
ax.set_xlabel(r"true anomaly (f)")
ax.set_ylabel(r"eccentricity")
im = ax.imshow(res, norm=LogNorm(), vmax=1., vmin=1.e-16, aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb = plt.colorbar(im, ax=ax)
cb.solids.set_rasterized(True)
cb.set_label("Relative Error")
def simulation(par):
e,theta = par
e = 10**e
theta = 10**theta
sim = rebound.Simulation()
sim.add(m=1.)
a = 1.
inc = random.random()*np.pi
Omega = random.random()*2*np.pi
omega = random.random()*2*np.pi
sim.add(m=0.,a=a,e=e,inc=inc,Omega=Omega, theta=theta)
o=sim.particles[1].orbit
if o.theta < 0:
o.theta += 2*np.pi
err = max(np.fabs(o.e-e)/e, np.fabs(o.theta-theta)/theta)
return err
random.seed(1)
N = 100
es = np.linspace(-16.,-1.,N)
thetas = np.linspace(-16.,-1.,N)
params = [(e,theta) for e in es for theta in thetas]
pool=rebound.InterruptiblePool()
res = pool.map(simulation, params)
res = np.array(res).reshape(N,N)
res = np.nan_to_num(res)
f,ax = plt.subplots(1,1,figsize=(7,5))
extent=[thetas.min(), thetas.max(), es.min(), es.max()]
ax.set_xlim(extent[0], extent[1])
ax.set_ylim(extent[2], extent[3])
ax.set_xlabel(r"true longitude (\theta)")
ax.set_ylabel(r"eccentricity")
im = ax.imshow(res, norm=LogNorm(), vmax=1., vmin=1.e-16, aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb = plt.colorbar(im, ax=ax)
cb.solids.set_rasterized(True)
cb.set_label("Relative Error")
sim.add(a=-0.2, e=1.4)
sim.status()
sim = rebound.Simulation()
sim.add(m=1.)
q = 0.1
a=-1.e14
e=1.+q/np.fabs(a)
sim.add(a=a, e=e)
print(sim.particles[1].orbit)
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(a=1.,inc=np.pi,e=0.1, Omega=0., pomega=1.)
print(sim.particles[1].orbit)
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1.e-3, a=1., jacobi_masses=True)
sim.add(m=1.e-3, a=5., jacobi_masses=True)
sim.move_to_com()
print(sim.particles[1].a, sim.particles[2].a)
o = sim.calculate_orbits(jacobi_masses=True)
print(o[0].a, o[1].a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Any components not passed automatically default to 0. REBOUND can also accept orbital elements.
Step2: We always have to pass a semimajor axis (to set a length scale), but any other elements are by default set to 0. Notice that our second star has the same vz as the first one due to the default Jacobi elements. Now we could add a distant planet on a circular orbit,
Step3: This planet is set up relative to the binary center of mass (again due to the Jacobi coordinates), which is probably what we want. But imagine we now want to place a test mass in a tight orbit around the second star. If we passed things as above, the orbital elements would be referenced to the binary/outer-planet center of mass. We can override the default by explicitly passing a primary (any instance of the Particle class)
Step4: All simulations are performed in Cartesian elements, so to avoid the overhead, REBOUND does not update particles' orbital elements as the simulation progresses. However, you can always access any orbital element through, e.g., sim.particles[1].inc (see the diagram, and table of orbital elements under the Orbit structure at http
Step5: Notice that there is always one less orbit than there are particles, since orbits are only defined between pairs of particles. We see that we got the first two orbits right, but the last one is way off. The reason is that again the REBOUND default is that we always get Jacobi elements. But we initialized the last particle relative to the second star, rather than the center of mass of all the previous particles.
Step6: though we could have simply avoided this problem by adding bodies from the inside out (second star, test mass, first star, circumbinary planet).
Step7: The problem here is that $\omega$ (the angle from the ascending node to pericenter) is ill-defined for a circular orbit, so it's not clear what we mean when we pass it, and we get spurious results for both $\omega$ and $f$, since the latter is also undefined as the angle from pericenter to the particle's position. However, the true longitude $\theta$, the broken angle from the $x$ axis to the ascending node = $\Omega + \omega + f$, and then to the particle's position, is always well defined
Step8: To be clearer and ensure we get the results we expect, we could instead pass theta to specify the longitude we want, e.g.
Step9: Here we have a planar orbit, in which case the line of nodes becomes ill defined, so $\Omega$ is not a good variable, but we pass it anyway! In this case, $\omega$ is also undefined since it is referenced to the ascending node. Here we get that now these two ill-defined variables get flipped. The appropriate variable is pomega ($\varpi = \Omega + \omega$), which is the angle from the $x$ axis to pericenter
Step10: We can specify the pericenter of the orbit with either $\omega$ or $\varpi$
Step11: Note that if the inclination is exactly zero, REBOUND sets $\Omega$ (which is undefined) to 0, so $\omega = \varpi$.
Step12: REBOUND calculates the mean longitude in such a way that it smoothly approaches $\theta$ in the limit of $e\rightarrow0$
Step13: In summary, you can specify the phase of the orbit through any one of the angles M, f, theta or l=$\lambda$. Additionally, one can instead use the time of pericenter passage T. This time should be set in the appropriate time units, and you'd initialize sim.t to the appropriate time you want to start the simulation.
Step14: We see that the behavior is poor, which is physically due to $f$ becoming poorly defined at low $e$. If instead we initialize the orbits with the true longitude $\theta$ as discussed above, we get much better results
Step15: Hyperbolic & Parabolic Orbits
Step16: Currently there is no support for exactly parabolic orbits, but we can get a close approximation by passing a nearby hyperbolic orbit where we can specify the pericenter = $|a|(e-1)$ with $a$ and $e$. For example, for a 0.1 AU pericenter,
Step17: Retrograde Orbits
Step18: We passed $\Omega=0$ and $\varpi=1.$. For prograde orbits, $\varpi = \Omega + \omega$, so we'd expect $\omega = 1$, but instead we get $\omega=-1$. If we think about things physically, $\varpi$ is the angle from the $x$ axis to pericenter, measured in the positive direction (counterclockwise) defined by $z$. $\Omega$ is always measured in this same sense, but $\omega$ is always measured in the orbital plane in the direction of the orbit. For retrograde orbits, this means that $\omega$ is measured in the opposite sense to $\Omega$, so $\varpi = \Omega - \omega$, which is why we got $\omega = -1$.
Step19: The jacobi mass and default mass assigned by REBOUND always agree for the first particle, but differ for all the rest
Step20: We can calculate orbital elements using jacobi masses by using the same flag in sim.calculate_orbits
|
3,704
|
<ASSISTANT_TASK:>
Python Code:
# point to our previously-saved data
reference_catalog = '../data/PTF_Refims_Files/PTF_d022683_f02_c06_u000114210_p12_sexcat.ctlg'
outfile = reference_catalog.split('/')[-1].replace('ctlg','shlv')
ra_fav, dec_fav = (312.503802, -0.706603)
mjds, mags, magerrs = source_lightcurve('../data/'+outfile, ra_fav, dec_fav)
import astropy.constants as const
(const.au / const.c).to(u.minute)
bjds = barycenter_times(mjds,ra_fav,dec_fav)
# define plot function
def plot_data( # COMPLETE THIS LINE
plt.errorbar( # COMPLETE THIS LINE
fmt = '_', capsize=0)
plt.xlabel('Date (MJD)')
plt.ylabel('Magnitude')
plt.gca().invert_yaxis()
# run plot function
plot_data(bjds, mags, magerrs)
# documentation for the astroML lomb_scargle function
help(lomb_scargle)
freq_min = # COMPLETE
print('The minimum frequency our data is sensitive to is {:.3f} radian/days, corresponding to a period of {:.3f} days'.format(freq_min, 2*np.pi/freq_min)
freq_max = # COMPLETE
print('The maximum frequency our data is sensitive to is APPROXIMATELY {:.3f} radian/days, corresponding to a period of {:.3f} days'.format(freq_max, 2*np.pi/freq_max)
n_bins = # COMPLETE
print(n_bins)
# define frequency function
def frequency_grid(times):
freq_min = # COMPLETE
freq_max = # COMPLETE
n_bins = # COMPLETE
print('Using {} bins'.format(n_bins))
return np.linspace(freq_min, freq_max, n_bins)
# run frequency function
omegas = frequency_grid(bjds)
# provided alternate frequency function
def alt_frequency_grid(Pmin, Pmax, n_bins = 5000):
Generate an angular frequency grid between Pmin and Pmax (assumed to be in days)
freq_max = 2*np.pi / Pmin
freq_min = 2*np.pi / Pmax
return np.linspace(freq_min, freq_max, n_bins)
# calculate and plot LS periodogram
P_LS = lomb_scargle( # COMPLETE
plt.plot(omegas, P_LS)
plt.xlabel('$\omega$')
plt.ylabel('$P_{LS}$')
# provided: define function to find best period
def LS_peak_to_period(omegas, P_LS):
find the highest peak in the LS periodogram and return the corresponding period.
max_freq = omegas[np.argmax(P_LS)]
return 2*np.pi/max_freq
# run function to find best period
best_period = LS_peak_to_period(omegas, P_LS)
print("Best period: {} days".format(best_period))
# define function to phase lightcurves
def phase(time, period, t0 = None):
Given an input array of times and a period, return the corresponding phase.
if t0 is None:
t0 = time[0]
return # COMPLETE
# define function to plot phased lc
def plot_phased_lc(mjds, mags, magerrs, period, t0=None):
phases = # COMPLETE
plt.errorbar( #COMPLETE
fmt = '_', capsize=0)
plt.xlabel('Phase')
plt.ylabel('Magnitude')
plt.gca().invert_yaxis()
# run function to plot phased lc
plot_phased_lc(bjds, mags, magerrs, best_period)
omegas = alt_frequency_grid( # COMPLETE
P_LS = lomb_scargle( # COMPLETE
plt.plot(omegas, P_LS)
plt.xlabel('$\omega$')
plt.ylabel('$P_{LS}$')
best_period = # COMPLETE
print("Best period: {} days".format(best_period))
plot_phased_lc(bjds, mags, magerrs, best_period)
D = lomb_scargle_bootstrap( # COMPLETE
sig99, sig95 = np.percentile( # COMPLETE
plt.plot(omegas, P_LS)
plt.plot([omegas[0],omegas[-1]], sig99*np.ones(2),'--')
plt.plot([omegas[0],omegas[-1]], sig95*np.ones(2),'--')
plt.xlabel('$\omega$')
plt.ylabel('$P_{LS}$')
import gatspy
ls = gatspy.periodic.LombScargleFast()
ls.optimizer.period_range = ( # COMPLETE
# we have to subtract the t0 time so the model plotting has the correct phase origin
ls.fit(bjds-bjds[0],mags,magerrs)
gatspy_period = ls. # COMPLETE
print(gatspy_period)
plot_phased_lc(bjds, mags, magerrs, gatspy_period)
p = np.linspace(0,gatspy_period,100)
plt.plot(p/gatspy_period,ls.predict(p,period=gatspy_period))
ss = gatspy.periodic.SuperSmoother(fit_period=True)
ss.optimizer.period_range = ( #COMPLETE
ss.fit( # COMPLETE
gatspy_period = ss. # COMPLETE
print(gatspy_period)
plot_phased_lc(bjds, mags, magerrs, gatspy_period)
p = np.linspace(0,gatspy_period,100)
plt.plot(p/gatspy_period,ss.predict(p,period=gatspy_period))
from astroML.time_series import multiterm_periodogram
omegas = alt_frequency_grid(.2,1.2)
P_mt = multiterm_periodogram( #COMPLETE
plt.plot(omegas, P_mt)
plt.xlabel('$\omega$')
plt.ylabel('$P_{mt}$')
best_period = # COMPLETE
print("Best period: {} days".format(best_period))
plot_phased_lc(bjds, mags, magerrs, best_period)
ls = gatspy.periodic.LombScargle(Nterms=4)
ls.optimizer.period_range = ( # COMPLETE
ls.fit( # COMPLETE
gatspy_period = ls. # COMPLETE
print(gatspy_period)
plot_phased_lc(bjds, mags, magerrs, gatspy_period)
p = np.linspace(0,gatspy_period,100)
plt.plot(p/gatspy_period,ls.predict(p,period=gatspy_period))
# open the stored data
import shelve
import astropy
shelf = shelve.open('../data/'+outfile)
all_mags = shelf['mags']
all_mjds = shelf['mjds']
all_errs = shelf['magerrs']
all_coords = shelf['ref_coords']
shelf.close()
# loop over stars
variable_inds = []
best_periods = []
best_power = []
with astropy.utils.console.ProgressBar(all_mags.shape[0],ipython_widget=True) as bar:
for i in range(all_mags.shape[0]):
# make sure there's real data
wgood = (all_mags[i,:].mask == False)
n_obs = np.sum(wgood)
# if we don't have many observations, don't bother computing periods
if n_obs < 40:
continue # the "continue" instruction tells python to skip the rest of the loop for this element and continue with the next one
# COMPLETE: make a cut so you only complete periods on variabile sources
if # source is not variable:
continue
variable_inds.append(i)
bjds = barycenter_times(all_mjds[wgood],all_coords[i].ra.degree,all_coords[i].dec.degree)
# COMPLETE: calculate best period
best_periods.append( # COMPLETE
best_power.append( # COMPLETE: add the LS power here
bar.update()
# COMPLETE: now find the most promising periods and plot them!
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll start by loading the data from our favorite star, which has coordinates $\alpha_\mathrm{J2000}, \delta_\mathrm{J2000} = (312.503802, -0.706603)$.
Step2: Barycentering
Step3: We have provided a script to barycenter the data--note that it assumes that the data come from the P48. Use the bjds (barycentered modified julian date) variable through the remainder of this notebook.
Step4: Optional exercise
Step5: The Lomb Scargle Periodogram
Step6: Exercise 2
Step7: Exercise 2.2
Step8: Exercise 2.3
Step9: Exercise 2.4
Step11: In some cases you'll want to generate the frequency grid by hand, either to extend to higher frequencies (shorter periods) than found by default, to avoid generating too many bins, or to get a more precise estimate of the period. In that case use the following code. We'll use a large fixed number of bins to smoothly sample the periodogram as we zoom in.
Step13: Exercise 3
Step15: Exercise 4
Step16: Exercise 5
Step17: How does that look? Do you think you are close to the right period?
Step18: Exercise 6
Step19: Exercise 7
Step20: [Challenge] Exercise 9
Step21: [Challenge] Exercise 10
Step22: [Challenge] Exercise 11
|
3,705
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
minval = 0
maxval = 255
a = 0.1
b = 0.9
return a + ((image_data-minval)*(b-a))/(maxval-minval)
# TODO: Implement Min-Max scaling for grayscale image data
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
#%matplotlib inline
%matplotlib notebook
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
#features = tf.truncated_normal()
# labels =
# TODO: Set the weights and biases tensors
# weights =
# biases =
# Features and Labels
features = tf.placeholder(tf.float32, [None, features_count])
labels = tf.placeholder(tf.float32, [None, labels_count])
# Weights & bias
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros([labels_count]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
# epochs =
# learning_rate =
epochs = 10
learning_rate = 0.01
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
3,706
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import openpnm as op
%config InlineBackend.figure_formats = ['svg']
import openpnm.models.physics as pmods
import matplotlib.pyplot as plt
pn = op.network.Cubic(shape=[15,15,15], spacing=6e-5)
geom = op.geometry.SpheresAndCylinders(network=pn, pores=pn['pore.all'],
throats=pn['throat.all'])
air = op.phases.Air(network=pn,name='air')
water = op.phases.Water(network=pn,name='water')
air.add_model(propname='throat.hydraulic_conductance',
model=pmods.hydraulic_conductance.hagen_poiseuille)
air.add_model(propname='throat.entry_pressure',
model=pmods.capillary_pressure.washburn)
water.add_model(propname='throat.hydraulic_conductance',
model=pmods.hydraulic_conductance.hagen_poiseuille)
water.add_model(propname='throat.entry_pressure',
model=pmods.capillary_pressure.washburn)
ip = op.algorithms.InvasionPercolation(network=pn, phase=air)
Finlets= pn.pores('top')
ip.set_inlets(pores=Finlets)
ip.run()
air.update(ip.results())
rp = op.algorithms.metrics.RelativePermeability(network=pn)
rp.settings.update({'nwp': 'air',
'invasion_sequence': 'invasion_sequence'})
rp.run(Snwp_num=10)
results=rp.get_Kr_data()
pd.DataFrame(results['kr_nwp'])
fig = rp.plot_Kr_curves()
rp = op.algorithms.metrics.RelativePermeability(network=pn)
rp.settings.update({'nwp': 'air',
'wp': 'water',
'invasion_sequence': 'invasion_sequence'})
rp.run(Snwp_num=10)
fig = rp.plot_Kr_curves()
rp = op.algorithms.metrics.RelativePermeability(network=pn)
inlets = {'x': 'top'}
outlets = {'x': 'bottom'}
rp.settings.update({'nwp': 'air',
'wp': 'water',
'invasion_sequence': 'invasion_sequence'
})
rp.settings['flow_inlets'].update(inlets)
rp.settings['flow_outlets'].update(outlets)
rp.run(Snwp_num=10)
fig = rp.plot_Kr_curves()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The only other argument that needs to be passed to the metrics relative permeability is the invasion sequence (We made it as a user defined sequence, so that the user has the option to implement the drainage process in any direction using any algorithm). The invasion sequence can be obtained by implementing an Invasion Percolation on the network. Asumming a drainage process, the air(invading/non-wetting phase) will be invading the medium.
Step2: Having the network and invasion sequence, we can now use the metrics relative permeability algorithm. These are the minimum required arguments for the algorithm to be run. If we do not pass the defending phase to the algorithm, it does not give us any report related to the defendin phase relative permeability. If we do not define the flow direction, it will automatically calculate the relative permeability in all three directions
Step3: Once the algorithm is run, the output can either be a table of values or a graph showing the relative permeability curves of the phase(s). Here we call both of those methods to see the outputs.
Step4: In order to get the relative permeability curves of both phases, we need to pass the defending phase as an argument to the algorithm.
Step5: The algorithm can also find the relative permeabilities of the phase(s) in the user-defined flow direction(s). The algorithm overwrites the flow inlets/outlets for the user-defined direction. Then calculates the relative permeability through the other directions from the default settings. This is illustrated as following.
|
3,707
|
<ASSISTANT_TASK:>
Python Code:
import os
from urllib.request import urlretrieve
import pybel
import logging
logging.getLogger('pybel').setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('urllib3').setLevel(logging.WARNING)
print(pybel.get_version())
DESKTOP_PATH = os.path.join(os.path.expanduser('~'), 'Desktop')
manager = pybel.Manager(f'sqlite:///{DESKTOP_PATH}/pybel_example_database.db')
url = 'https://raw.githubusercontent.com/pharmacome/conib/master/hbp_knowledge/tau/boland2018.bel'
boland_2018_graph = pybel.from_bel_script_url(url, manager=manager)
pybel.to_database(boland_2018_graph, manager=manager)
boland_2018_graph.summarize()
url = 'https://raw.githubusercontent.com/pharmacome/conib/master/hbp_knowledge/tau/caballero2018.bel'
path = os.path.join(DESKTOP_PATH, 'caballero2018.bel')
if not os.path.exists(path):
urlretrieve(url, path)
cabellero_2018_graph = pybel.from_bel_script(path, manager=manager)
cabellero_2018_graph.summarize()
pybel.to_database(cabellero_2018_graph, manager=manager)
combined_graph = pybel.union([boland_2018_graph, cabellero_2018_graph])
combined_graph.summarize()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we'll download and parse a BEL document from the Human Brain Pharmacome project describing the 2018 paper from Boland et al., "Promoting the clearance of neurotoxic proteins in neurodegenerative disorders of ageing".
Step2: A BEL document can be downloaded and parsed from a URL using pybel.from_bel_script_url. Keep in mind, the first time we load a given BEL document, various BEL resources that are referenced in the document must be cached. Be patient - this can take up to ten minutes.
Step3: The graph is loaded into an instance of the pybel.BELGraph class. We can use the pybel.BELGraph.summarize() to print a brief summary of the graph.
Step4: Next, we'll open and parse a BEL document from the Human Brain Pharmacome project describing the 2018 paper from Cabellero et al., "Interplay of pathogenic forms of human tau with different autophagic pathways". This example uses urlretrieve() to download the file locally to demonstrate how to load from a local file path.
Step5: A BEL document can also be parsed from a path to a file using pybel.from_bel_script. Like before, we will summarize the graph after parsing it.
Step6: We can combine two or more graphs in a list using pybel.union.
|
3,708
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import SVG
SVG('oneport_tiered_calibration/images/boxDiagram.svg')
SVG('oneport_tiered_calibration/images/probe.svg')
ls oneport_tiered_calibration/
ls oneport_tiered_calibration/tier1/
ls oneport_tiered_calibration/tier1/measured/
from skrf.calibration import OnePort
import skrf as rf
%matplotlib inline
from pylab import *
rf.stylely()
tier1_ideals = rf.read_all_networks('oneport_tiered_calibration/tier1/ideals/')
tier1_measured = rf.read_all_networks('oneport_tiered_calibration/tier1/measured/')
tier1 = OnePort(measured = tier1_measured,
ideals = tier1_ideals,
name = 'tier1',
sloppy_input=True)
tier1
tier2_ideals = rf.read_all_networks('oneport_tiered_calibration/tier2/ideals/')
tier2_measured = rf.read_all_networks('oneport_tiered_calibration/tier2/measured/')
tier2 = OnePort(measured = tier2_measured,
ideals = tier2_ideals,
name = 'tier2',
sloppy_input=True)
tier2
tier1.error_ntwk.plot_s_db()
title('Tier 1 Error Network')
tier2.error_ntwk.plot_s_db()
title('Tier 2 Error Network')
dut = tier1.error_ntwk.inv ** tier2.error_ntwk
dut.name = 'probe'
dut.plot_s_db()
title('Probe S-parameters')
ylim(-60,10)
ls probe*
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook will demonstrate how to use skrf to do a two-tiered one-port calibration. We'll use data that was taken to characterize a waveguide-to-CPW probe. So, for this specific example the diagram above looks like
Step2: Some Data
Step3: (if you dont have the git repo for these examples, the data for this notebook can be found here)
Step4: The first tier is at waveguide interface, and consisted of the following set of standards
Step5: Creating Calibrations
Step6: Because we saved corresponding ideal and measured standards with identical names, the Calibration will automatically align our standards upon initialization. (More info on creating Calibration objects this can be found in the docs.)
Step7: Error Networks
Step8: Similarly for tier 2,
Step9: De-embedding the DUT
Step10: You may want to save this to disk, for future use,
|
3,709
|
<ASSISTANT_TASK:>
Python Code:
def f(x): return np.sin(x) # Функция
def dfdx(x): return np.cos(x) # и ее производная.
x0 = 1 # Точка, в которой производится дифференциирование.
dx = np.logspace(-16, 0, 100) # Приращения аргумента.
# Найдем приращения функции
df = f(x0+dx)-f(x0)
# и оценим производные.
approx_dfdx = df/dx
# Вычислим точное значение производной
exact_dfdx = dfdx(x0)
# и вычислим относительные погрешности.
relative_error = np.abs(1.0-approx_dfdx/exact_dfdx)
# Строим график зависимости погрешности от приращения.
plt.loglog(dx, relative_error)
plt.xlabel("Приращение аргумента")
plt.ylabel("Относительная погрешность")
plt.show()
def experiment(method, f=np.sin, dfdx=np.cos, x0=1, dx = np.logspace(-16, 0, 100)):
Оценивает производную `f` с помощью функции `method`, сравнивает со значением аналитической
производной `dfdx`, и строит график относительной ошибки от приращения аргумента.
Оценка производной производиться функцией `method(f, x0, dx)` принимающей на вход
функцию `f`, которая дифференцруется в точке `x0`, используя приращения `dx`;
функция `method` возвращает вектор значений производной на всех переданных приращениях `dx`.
approx_dfdx = method(f, x0, dx) # Оценка производных.
exact_dfdx = dfdx(x0) # Точное значение производной.
relative_error = np.abs(1.0-approx_dfdx/exact_dfdx) # Относительные погрешности.
plt.loglog(dx, relative_error, label=method.__name__)
plt.xlabel("Приращение аргумента")
plt.ylabel("Относительная погрешность")
return relative_error
def forward_divided_difference(f, x0, dx):
Прямая разделенная разность.
return (f(x0+dx)-f(x0))/dx
def backward_divided_difference(f, x0, dx):
Обратная разделенная разность.
return (f(x0)-f(x0-dx))/dx
def central_divided_difference(f, x0, dx):
Центральная разделенная разность.
return (f(x0+dx/2)-f(x0-dx/2))/dx
# Строим график зависимости погрешности от приращения.
experiment(forward_divided_difference)
experiment(backward_divided_difference)
experiment(central_divided_difference)
plt.legend()
plt.show()
class AG:
def __init__(self, v, d):
Инициализирует пару (f, df/dx) = (v, d).
self.v = v
self.d = d
# Представление констант
@staticmethod
def const(x):
return AG(x, 1)
# Арифметические операции
def __add__(self, other):
return AG(self.v+other.v, self.d+other.d)
def __sub__(self, other):
return AG(self.v-other.v, self.d-other.d)
def __mul__(self, other):
return AG(self.v*other.v, self.d*other.v+self.v*other.d)
def __truediv__(self, other):
return AG(self.v/other.v, (self.d*other.v-self.v*other.d)/(other.v**2) )
# Возведение в степень
def __pow__(self, other):
return AG(np.power(self.v, other.v), np.power(self.v,other.v-1.)*other.v*self.d
+ np.power(self.v,other.v)*np.log(self.v)*other.d )
# Основные функции
@staticmethod
def sin(x):
return AG(np.sin(x.v), np.cos(x.v)*x.d)
@staticmethod
def cos(x):
return AG(np.cos(x.v), -np.sin(x.v)*x.d)
@staticmethod
def log(x):
return AG(np.log(x.v), x.d/x.v)
x = AG.const(3)
y = x*x/x
print(f"y({x.v})={y.v} y'({x.v})={y.d}")
# Сравним автоматическое дифференцирование с другими способами счета.
# Сложная фукнция
def f(x): return x**AG.sin(x**AG.cos(x))
# и ее еще более сложная аналитическая производная
def dfdx(x):
return x**AG.sin(x**AG.cos(x))*(
x**AG.cos(x)*AG.cos(x**AG.cos(x))*AG.log(x)*(AG.cos(x)/x - AG.log(x)*AG.sin(x))
+ AG.sin(x**AG.cos(x))/x
)
# Точки для оценки производной.
x0 = np.linspace(1,10,100)
# Шаг для конечной разности.
h = 1e-8
# Оценка производной через центральную разделенную разность.
divided_difference = ( f(AG.const(x0+h/2)).v - f(AG.const(x0-h/2)).v )/h
# Аналитический ответ.
analytic = dfdx( AG.const(x0) ).v
# Автоматическое дифференцирование.
autograd = f( AG.const(x0) ).d
def abs_err(x, y):
Считает абсолютную ошибку.
return np.abs(x-y)
# Сравниваем три результата между собой.
plt.semilogy(x0, abs_err(divided_difference, analytic), label="DD - A")
plt.semilogy(x0, abs_err(divided_difference, autograd), '.', label="DD - AG")
plt.semilogy(x0, abs_err(autograd, analytic), label="AG - A")
plt.legend()
plt.show()
# Сравним погрешности прямой и центральной разделенной разности на решетке.
def f(x): return np.sin(x**2) # Функция
def dfdx(x): return 2*x*np.cos(x**2) # и ее производная
# Зададим решетку
xk = np.linspace(0,10,1000)
# Вычислим на ней функцию
fk = f(xk)
# Приближенные значения производной:
central_dfdx = np.empty_like(xk); central_dfdx[:] = np.nan
central_dfdx[1:-1] = (fk[2:]-fk[:-2])/(xk[2:]-xk[:-2])
forward_dfdx = np.empty_like(xk); forward_dfdx[:] = np.nan
forward_dfdx[:-1] = (fk[1:]-fk[:-1])/(xk[1:]-xk[:-1])
# Точные значения производной
exact_dfdx = dfdx(xk)
yk = (xk[1:]+xk[:-1])/2 # Смещенная решетка.
shifted_dfdx = (fk[1:]-fk[:-1])/(xk[1:]-xk[:-1]) # Оценка центральной разделенной разностью.
exact_shifted = dfdx(yk) # Точные значения на смещенной решетке
plt.semilogy(xk, abs_err(central_dfdx, exact_dfdx), label="центральная")
plt.semilogy(xk, abs_err(forward_dfdx, exact_dfdx), label="прямая")
plt.semilogy(yk, abs_err(shifted_dfdx, exact_shifted), label="смещенная решетка")
plt.xlabel("Узел решетки")
plt.ylabel("Абсолютная ошибка")
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Как мы видим, погрешность не стремиться к нулю, а достигает своего минимума при шагах около $10^{-8}$, затем снова растет.
Step8: Задание
Step9: Приближение через конечные разности дает ожидаемо большую погрешность. Аналитическая формула и автоматичесое дифференцирование дает очень похожие, но все же отличающиеся результаты.
|
3,710
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q tflite-model-maker
import numpy as np
import os
from tflite_model_maker import model_spec
from tflite_model_maker import text_classifier
from tflite_model_maker.config import ExportFormat
from tflite_model_maker.text_classifier import AverageWordVecSpec
from tflite_model_maker.text_classifier import DataLoader
import tensorflow as tf
assert tf.__version__.startswith('2')
tf.get_logger().setLevel('ERROR')
data_dir = tf.keras.utils.get_file(
fname='SST-2.zip',
origin='https://dl.fbaipublicfiles.com/glue/data/SST-2.zip',
extract=True)
data_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')
import pandas as pd
def replace_label(original_file, new_file):
# Load the original file to pandas. We need to specify the separator as
# '\t' as the training data is stored in TSV format
df = pd.read_csv(original_file, sep='\t')
# Define how we want to change the label name
label_map = {0: 'negative', 1: 'positive'}
# Excute the label change
df.replace({'label': label_map}, inplace=True)
# Write the updated dataset to a new file
df.to_csv(new_file)
# Replace the label name for both the training and test dataset. Then write the
# updated CSV dataset to the current folder.
replace_label(os.path.join(os.path.join(data_dir, 'train.tsv')), 'train.csv')
replace_label(os.path.join(os.path.join(data_dir, 'dev.tsv')), 'dev.csv')
spec = model_spec.get('average_word_vec')
train_data = DataLoader.from_csv(
filename='train.csv',
text_column='sentence',
label_column='label',
model_spec=spec,
is_training=True)
test_data = DataLoader.from_csv(
filename='dev.csv',
text_column='sentence',
label_column='label',
model_spec=spec,
is_training=False)
model = text_classifier.create(train_data, model_spec=spec, epochs=10)
loss, acc = model.evaluate(test_data)
model.export(export_dir='average_word_vec')
mb_spec = model_spec.get('mobilebert_classifier')
train_data = DataLoader.from_csv(
filename='train.csv',
text_column='sentence',
label_column='label',
model_spec=mb_spec,
is_training=True)
test_data = DataLoader.from_csv(
filename='dev.csv',
text_column='sentence',
label_column='label',
model_spec=mb_spec,
is_training=False)
model = text_classifier.create(train_data, model_spec=mb_spec, epochs=3)
model.summary()
loss, acc = model.evaluate(test_data)
model.export(export_dir='mobilebert/')
model.export(export_dir='mobilebert/', export_format=[ExportFormat.LABEL, ExportFormat.VOCAB])
accuracy = model.evaluate_tflite('mobilebert/model.tflite', test_data)
print('TFLite model accuracy: ', accuracy)
new_model_spec = model_spec.get('mobilebert_classifier')
new_model_spec.seq_len = 256
new_model_spec = AverageWordVecSpec(wordvec_dim=32)
new_train_data = DataLoader.from_csv(
filename='train.csv',
text_column='sentence',
label_column='label',
model_spec=new_model_spec,
is_training=True)
model = text_classifier.create(new_train_data, model_spec=new_model_spec)
model = text_classifier.create(new_train_data, model_spec=new_model_spec, epochs=20)
new_test_data = DataLoader.from_csv(
filename='dev.csv',
text_column='sentence',
label_column='label',
model_spec=new_model_spec,
is_training=False)
loss, accuracy = model.evaluate(new_test_data)
spec = model_spec.get('bert_classifier')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Text classification with TensorFlow Lite Model Maker
Step2: Import the required packages.
Step3: Download the sample training data.
Step4: The SST-2 dataset is stored in TSV format. The only difference between TSV and CSV is that TSV uses a tab \t character as its delimiter instead of a comma , in the CSV format.
Step5: Quickstart
Step6: Model Maker also supports other model architectures such as BERT. If you are interested to learn about other architecture, see the Choose a model architecture for Text Classifier section below.
Step 2. Load the training and test data, then preprocess them according to a specific model_spec.
Step7: Step 3. Train the TensorFlow model with the training data.
Step8: Step 4. Evaluate the model with the test data.
Step9: Step 5. Export as a TensorFlow Lite model.
Step10: You can download the TensorFlow Lite model file using the left sidebar of Colab. Go into the average_word_vec folder as we specified in export_dir parameter above, right-click on the model.tflite file and choose Download to download it to your local computer.
Step11: Load training data
Step12: The Model Maker library also supports the from_folder() method to load data. It assumes that the text data of the same class are in the same subdirectory and that the subfolder name is the class name. Each text file contains one movie review sample. The class_labels parameter is used to specify which the subfolders.
Step13: Examine the detailed model structure.
Step14: Evaluate the model
Step15: Export as a TensorFlow Lite model
Step16: The TensorFlow Lite model file can be integrated in a mobile app using the BertNLClassifier API in TensorFlow Lite Task Library. Please note that this is different from the NLClassifier API used to integrate the text classification trained with the average word vector model architecture.
Step17: You can evaluate the TFLite model with evaluate_tflite method to measure its accuracy. Converting the trained TensorFlow model to TFLite format and apply quantization can affect its accuracy so it is recommended to evaluate the TFLite model accuracy before deployment.
Step18: Advanced Usage
Step19: Customize the average word embedding model hyperparameters
Step20: Get the preprocessed data.
Step21: Train the new model.
Step22: Tune the training hyperparameters
Step23: Evaluate the newly retrained model with 20 training epochs.
Step24: Change the Model Architecture
|
3,711
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.5
import math
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
print(tf.__version__)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",")
df.head()
df.describe()
np.random.seed(seed=1) #makes result reproducible
msk = np.random.rand(len(df)) < 0.8
traindf = df[msk]
evaldf = df[~msk]
def add_more_features(df):
# TODO: Add more features to the dataframe
return df
# Create pandas input function
def make_input_fn(df, num_epochs):
return tf.compat.v1.estimator.inputs.pandas_input_fn(
x = add_more_features(df),
y = df['median_house_value'] / 100000, # will talk about why later in the course
batch_size = 128,
num_epochs = num_epochs,
shuffle = True,
queue_capacity = 1000,
num_threads = 1
)
# Define your feature columns
def create_feature_cols():
return [
tf.feature_column.numeric_column('housing_median_age')
# TODO: Define additional feature columns
# Hint: Are there any features that would benefit from bucketizing?
]
# Create estimator train and evaluate function
def train_and_evaluate(output_dir, num_train_steps):
# TODO: Create tf.estimator.LinearRegressor, train_spec, eval_spec, and train_and_evaluate using your feature columns
OUTDIR = './trained_model'
# Run the model
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
tf.compat.v1.summary.FileWriterCache.clear()
train_and_evaluate(OUTDIR, 2000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we'll load our data set.
Step2: Examine and split the data
Step3: Now, split the data into two parts -- training and evaluation.
Step4: Training and Evaluation
|
3,712
|
<ASSISTANT_TASK:>
Python Code:
import time
import timeit
currentTime = time.gmtime()
print(currentTime)
time.strftime("%a, %d %b %Y %H:%M:%S +0000", currentTime)
type(currentTime)
currentTime.tm_hour
def parseTime(timeObj):
'''parseTime:
takes time.struct_time instances
:return time displayed as string - year month day hour min sec'''
return (str(timeObj.tm_year) + str(timeObj.tm_mon) + str(timeObj.tm_mday) +
str(timeObj.tm_hour) + str(timeObj.tm_min) + str(timeObj.tm_sec))
parseTime(currentTime)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: UTC and local time
Step2: As you can see, the returned values are not very interesting or useful themselves. Lets parse structure into more useful forms. <br>
Step3: Now lets parse it into a computer-friendly form. To start with, we should understand what type of structure <code>currentTime</code> is.
Step4: As we can see, <code>currentTime</code> is an instance of a class, <code>struct_time</code> as is defined in the <code>time</code> library.
Step5: With this knowledge, it becomes trivial to parse this data into a more usable format.
|
3,713
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
%%capture
#@title Setup Environment
# Install the latest Tensorflow version.
!pip install -q tensorflow_text
!pip install -q simpleneighbors[annoy]
!pip install -q nltk
!pip install -q tqdm
#@title Setup common imports and functions
import json
import nltk
import os
import pprint
import random
import simpleneighbors
import urllib
from IPython.display import HTML, display
from tqdm.notebook import tqdm
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
from tensorflow_text import SentencepieceTokenizer
nltk.download('punkt')
def download_squad(url):
return json.load(urllib.request.urlopen(url))
def extract_sentences_from_squad_json(squad):
all_sentences = []
for data in squad['data']:
for paragraph in data['paragraphs']:
sentences = nltk.tokenize.sent_tokenize(paragraph['context'])
all_sentences.extend(zip(sentences, [paragraph['context']] * len(sentences)))
return list(set(all_sentences)) # remove duplicates
def extract_questions_from_squad_json(squad):
questions = []
for data in squad['data']:
for paragraph in data['paragraphs']:
for qas in paragraph['qas']:
if qas['answers']:
questions.append((qas['question'], qas['answers'][0]['text']))
return list(set(questions))
def output_with_highlight(text, highlight):
output = "<li> "
i = text.find(highlight)
while True:
if i == -1:
output += text
break
output += text[0:i]
output += '<b>'+text[i:i+len(highlight)]+'</b>'
text = text[i+len(highlight):]
i = text.find(highlight)
return output + "</li>\n"
def display_nearest_neighbors(query_text, answer_text=None):
query_embedding = model.signatures['question_encoder'](tf.constant([query_text]))['outputs'][0]
search_results = index.nearest(query_embedding, n=num_results)
if answer_text:
result_md = '''
<p>Random Question from SQuAD:</p>
<p> <b>%s</b></p>
<p>Answer:</p>
<p> <b>%s</b></p>
''' % (query_text , answer_text)
else:
result_md = '''
<p>Question:</p>
<p> <b>%s</b></p>
''' % query_text
result_md += '''
<p>Retrieved sentences :
<ol>
'''
if answer_text:
for s in search_results:
result_md += output_with_highlight(s, answer_text)
else:
for s in search_results:
result_md += '<li>' + s + '</li>\n'
result_md += "</ol>"
display(HTML(result_md))
#@title Download and extract SQuAD data
squad_url = 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' #@param ["https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"]
squad_json = download_squad(squad_url)
sentences = extract_sentences_from_squad_json(squad_json)
questions = extract_questions_from_squad_json(squad_json)
print("%s sentences, %s questions extracted from SQuAD %s" % (len(sentences), len(questions), squad_url))
print("\nExample sentence and context:\n")
sentence = random.choice(sentences)
print("sentence:\n")
pprint.pprint(sentence[0])
print("\ncontext:\n")
pprint.pprint(sentence[1])
print()
#@title Load model from tensorflow hub
module_url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3" #@param ["https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3", "https://tfhub.dev/google/universal-sentence-encoder-qa/3"]
model = hub.load(module_url)
#@title Compute embeddings and build simpleneighbors index
batch_size = 100
encodings = model.signatures['response_encoder'](
input=tf.constant([sentences[0][0]]),
context=tf.constant([sentences[0][1]]))
index = simpleneighbors.SimpleNeighbors(
len(encodings['outputs'][0]), metric='angular')
print('Computing embeddings for %s sentences' % len(sentences))
slices = zip(*(iter(sentences),) * batch_size)
num_batches = int(len(sentences) / batch_size)
for s in tqdm(slices, total=num_batches):
response_batch = list([r for r, c in s])
context_batch = list([c for r, c in s])
encodings = model.signatures['response_encoder'](
input=tf.constant(response_batch),
context=tf.constant(context_batch)
)
for batch_index, batch in enumerate(response_batch):
index.add_one(batch, encodings['outputs'][batch_index])
index.build()
print('simpleneighbors index for %s sentences built.' % len(sentences))
#@title Retrieve nearest neighbors for a random question from SQuAD
num_results = 25 #@param {type:"slider", min:5, max:40, step:1}
query = random.choice(questions)
display_nearest_neighbors(query[0], query[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multilingual Universal Sentence Encoder Q&A 检索
Step2: 运行以下代码块,下载并将 SQuAD 数据集提取为:
Step3: 以下代码块使用 <a>Univeral Encoder Multilingual Q&A 模型</a>的 question_encoder 和 <strong>response_encoder</strong> 签名对 TensorFlow 计算图 g 和会话进行设置。
Step4: 以下代码块计算所有文本的嵌入向量和上下文元组,并使用 response_encoder 将它们存储在 simpleneighbors 索引中。
Step5: 检索时,使用 question_encoder 对问题进行编码,而问题嵌入向量用于查询 simpleneighbors 索引。
|
3,714
|
<ASSISTANT_TASK:>
Python Code:
# Loading metadata from trainning database
con = sqlite3.connect("F:/FMR/data.sqlite")
db_documents = pd.read_sql_query("SELECT * from documents", con)
db_authors = pd.read_sql_query("SELECT * from authors", con)
data = db_documents # just a handy alias
data.head()
tokenised = load_json("abstract_tokenised.json")
# Let's have a peek
tokenised["acis2001/1"][:10]
from textblob import TextBlob
non_en = [] # a list of ids of the documents in other languages
count = 0
for id_, entry in data.iterrows():
count += 1
try:
lang = TextBlob(entry["title"] + " " + entry["abstract"]).detect_language()
except:
raise
if lang != 'en':
non_en.append(id_)
print(lang, data.iloc[id_]["title"])
if (count % 100) == 0:
print("Progress: ", count)
save_pkl(non_en, "non_en.list.pkl")
non_en = load_pkl("non_en.list.pkl")
# Convert our dict-based structure to be a list-based structure that are readable by Gensim and at the same time,
# filter out those non-English documents
tokenised_list = [tokenised[i] for i in data["submission_path"] if i not in non_en]
def remove_hyphenation(l):
return [i.replace("- ", "").replace("-", "") for i in l]
tokenised_list = [remove_hyphenation(i) for i in tokenised_list]
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def lemmatize(l):
return [" ".join([lemmatizer.lemmatize(token)
for token
in phrase.split(" ")])
for phrase in l]
def lemmatize_all(tokenised):
# Lemmatize the documents.
lemmatized = [lemmatize(entry) for entry in tokenised]
return lemmatized
" ".join([lemmatizer.lemmatize(token)
for token
in 'assistive technologies'.split(" ")])
tokenised_list = lemmatize_all(tokenised_list)
# In case we need it in the future
save_json(tokenised_list, "abstract_lemmatized.json")
# To load it:
tokenised_list = load_json("abstract_lemmatized.json")
from gensim.corpora import Dictionary
# Create a dictionary for all the documents. This might take a while.
dictionary = Dictionary(tokenised_list)
# Let's see what's inside, note the spelling :)
# But there is really nothing we can do with that.
dictionary[0]
len(dictionary)
# remove tokens that appear in less than 20 documents and tokens that appear in more than 50% of the documents.
dictionary.filter_extremes(no_below=2, no_above=0.5, keep_n=None)
len(dictionary)
# Helpers
display_limit = 10
def shorter_than(n):
bad = []
count = 0
for i in dictionary:
if len(dictionary[i]) < n:
count += 1
if count < display_limit:
print(dictionary[i])
bad.append(i)
print(count)
return bad
def if_in(symbol):
bad = []
count = 0
for i in dictionary:
if symbol in dictionary[i]:
count += 1
if count < display_limit:
print(dictionary[i])
bad.append(i)
print(count)
return bad
def more_than(symbol, n):
bad = []
count = 0
for i in dictionary:
if dictionary[i].count(symbol) > n:
count += 1
if count < display_limit:
print(dictionary[i])
bad.append(i)
print(count)
return bad
bad = shorter_than(3)
dictionary.filter_tokens(bad_ids=bad)
display_limit = 10
bad = if_in("*")
dictionary.filter_tokens(bad_ids=bad)
bad = if_in("<")
dictionary.filter_tokens(bad_ids=bad)
bad = if_in(">")
dictionary.filter_tokens(bad_ids=bad)
bad = if_in("%")
dictionary.filter_tokens(bad_ids=bad)
bad = if_in("/")
dictionary.filter_tokens(bad_ids=bad)
bad = if_in("[")
bad += if_in("]")
bad += if_in("}")
bad += if_in("{")
dictionary.filter_tokens(bad_ids=bad)
display_limit = 20
bad = more_than(" ", 3)
dictionary.filter_tokens(bad_ids=bad)
bad = if_in("- ") # verify that there is no hyphenation problem
bad = if_in("quarter")
dictionary.filter_tokens(bad_ids=bad)
names = load_json("names.json")
name_ids = [i for i, v in dictionary.iteritems() if v in names]
dictionary.filter_tokens(bad_ids=name_ids)
locations = load_json("locations.json")
location_ids = [i for i, v in dictionary.iteritems() if v in locations]
dictionary.filter_tokens(bad_ids=location_ids)
locations[:10]
names[:15] # not looking good, but it seems like it won't do much harm either
corpus = [dictionary.doc2bow(l) for l in tokenised_list]
# Save it for future usage
from gensim.corpora.mmcorpus import MmCorpus
MmCorpus.serialize("aisnet_abstract_np_cleaned.mm", corpus)
# Also save the dictionary
dictionary.save("aisnet_abstract_np_cleaned.ldamodel.dictionary")
# To load the corpus:
from gensim.corpora.mmcorpus import MmCorpus
corpus = MmCorpus("aisnet_abstract_cleaned.mm")
# To load the dictionary:
from gensim.corpora import Dictionary
dictionary = Dictionary.load("aisnet_abstract_np_cleaned.ldamodel.dictionary")
# Train LDA model.
from gensim.models import LdaModel
# Set training parameters.
num_topics = 150
chunksize = 2000
passes = 1
iterations = 150
eval_every = None # Don't evaluate model perplexity, takes too much time.
# Make a index to word dictionary.
print("Dictionary test: " + dictionary[0]) # This is only to "load" the dictionary.
id2word = dictionary.id2token
model = LdaModel(corpus=corpus, id2word=id2word, chunksize=chunksize, \
alpha='auto', eta='auto', \
iterations=iterations, num_topics=num_topics, \
passes=passes, eval_every=eval_every)
# Save the LDA model
model.save("aisnet_abstract_150_cleaned.ldamodel")
from gensim.models import LdaModel
model = LdaModel.load("aisnet_abstract_150_cleaned.ldamodel")
import pyLDAvis.gensim
vis = pyLDAvis.gensim.prepare(model, corpus, dictionary)
pyLDAvis.display(vis)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Tokenised Full Text
Step2: Preprocessing Data for Gensim and Finetuning
Step3: Although we tried to handle these hyphenations in the previous tutorial, now we still have them for some reasons. The most conveient way to remove them is to remove them in the corpus and rebuild the dictionary. Then re-apply our previous filter.
Step4: Lemmatization
Step5: Then we can create our lemmatized vocabulary.
Step6: Obviously we have a way too large vocabulary size. This is because the algorithm used in TextBlob's noun phrase extraction is not very robust in complicated scenario. Let's see what we can do about this.
Step7: Now we have drastically reduced the size of the vocabulary from 2936116 to 102508. However this is not enough. For example
Step8: We have 752 such meaningless tokens in our vocabulary. Presumably this is because that during the extraction of the PDF, some mathenmatical equations are parsed as plain text (of course).
Step9: Removing Names & Locations
Step10: Building Corpus in Gensim Format
Step11: Train the LDA Model
Step12: Visualize the LDA Model
|
3,715
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
iris = load_iris()
# create X(features) and y(response)
X = iris.data
y = iris.target
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X, y)
y_pred = logreg.predict(X)
print "predicted response:\n",y_pred
len(y_pred)
from sklearn import metrics
print metrics.accuracy_score(y, y_pred)
from sklearn.neighbors import KNeighborsClassifier
knn5 = KNeighborsClassifier(n_neighbors=5)
knn5.fit(X, y)
y_pred = knn5.predict(X)
print metrics.accuracy_score(y, y_pred)
knn1 = KNeighborsClassifier(n_neighbors=1)
knn1.fit(X, y)
y_pred = knn1.predict(X)
print metrics.accuracy_score(y, y_pred)
print X.shape
print y.shape
# 第一步:将X和y分割成训练和测试集
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=4)
# 这里的random_state参数根据给定的给定的整数,得到伪随机生成器的随机采样
print X_train.shape
print X_test.shape
print y_train.shape
print y_test.shape
# 第二步:使用训练数据训练模型
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# 第三步: 针对测试数据进行预测,并得到测试准确率
y_pred = logreg.predict(X_test)
print metrics.accuracy_score(y_test, y_pred)
# 可以尝试一下,对于上面不同数据集分割,得到的测试准确率不同
# K=5
knn5 = KNeighborsClassifier(n_neighbors=5)
knn5.fit(X_train, y_train)
y_pred = knn5.predict(X_test)
print metrics.accuracy_score(y_test, y_pred)
# K=1
knn1 = KNeighborsClassifier(n_neighbors=1)
knn1.fit(X_train, y_train)
y_pred = knn1.predict(X_test)
print metrics.accuracy_score(y_test, y_pred)
# 测试从K=1到K=25,记录测试准确率
k_range = range(1, 26)
test_accuracy = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
test_accuracy.append(metrics.accuracy_score(y_test, y_pred))
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(k_range, test_accuracy)
plt.xlabel("Value of K for KNN")
plt.ylabel("Testing Accuracy")
# 这里我们对未知数据进行预测
knn11 = KNeighborsClassifier(n_neighbors=11)
knn11.fit(X, y)
knn11.predict([3, 5, 4, 2])
from sklearn.cross_validation import KFold
import numpy as np
def cv_estimate(k, kfold=5):
cv = KFold(n = X.shape[0], n_folds=kfold)
clf = KNeighborsClassifier(n_neighbors=k)
score = 0
for train, test in cv:
clf.fit(X[train], y[train])
score += clf.score(X[test], y[test])
#print clf.score(X[test], y[test])
score /= kfold
return score
# 测试从K=1到K=25,记录测试准确率
k_range = range(1, 26)
test_accuracy = []
for k in k_range:
test_accuracy.append(cv_estimate(k, 5))
print test_accuracy
plt.plot(k_range, test_accuracy)
plt.xlabel("Value of K for KNN")
plt.ylabel("Average Accuracy of Kfold CV")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Logistic regression
Step2: 分类准确率
Step3: 以上说明对于训练的数据,我们有96%的数据预测正确。这里我们使用相同的数据来训练和预测,使用的度量称其为训练准确度。
Step4: KNN(K=1)
Step5: 上面我们得到了训练准确度为100%的模型,貌似得到了最好的模型和参数。但我们回想一下KNN算法的原理,KNN算法寻找训练数据中的K个最近的数据,它使用指向最多的那个类别来作为预测的输出。
Step6: 上面这个图告诉我们train_test_split函数的功能,将一个数据集分成两部分。这样使用不同的数据集对模型进行分别的训练和测试,得到的测试准确率能够更好的估计模型对于未知数据的预测效果。
Step7: 使用KNN算法
Step8: 我们能找到一个比较好的K值吗?
Step9: 模型参数的选择
Step10: 小结
|
3,716
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import tweepy
import datetime
import json
import os
from pysqlite2 import dbapi2 as sqlite3
APP_KEY = ""
APP_SECRET = ""
OAUTH_TOKEN = ""
OAUTH_TOKEN_SECRET = ""
auth = tweepy.OAuthHandler(APP_KEY, APP_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
api = tweepy.API(auth)
databasefile = "spatial-backup.sqlite"
conn = sqlite3.connect(databasefile)
conn.enable_load_extension(True)
conn.execute('SELECT load_extension("/usr/lib64/mod_spatialite.so.7")')
curs = conn.cursor()
def coordinates_to_wkt(coords):
if coords == None:
return ""
return "POINT("+str(coords["coordinates"][0])+" "+str(coords["coordinates"][1])+")"
def bbox_to_wkt(bbox):
if bbox.coordinates == None:
return ""
if bbox.type == "Polygon":
centroid = [0, 0]
centroid[0] = (bbox.coordinates[0][2][0] + bbox.coordinates[0][0][0]) / 2
centroid[1] = (bbox.coordinates[0][2][1] + bbox.coordinates[0][0][1]) / 2
return "POINT("+str(centroid[0])+" "+str(centroid[1])+")"
print "Unknown place type!"
return ""
def process_query(search_results):
for tweet in search_results:
full_place_name = ""
place_type = ""
location = ""
username = tweet.user.screen_name
followers_count = tweet.user.followers_count
tweettext = tweet.text.encode("utf-8")
if tweet.place != None:
full_place_name = tweet.place.full_name
place_type = tweet.place.place_type
coordinates = tweet.coordinates
if (coordinates != None) or (tweet.place != None):
print 'Found a geolocated tweet! By:'
print username
print '==========================='
if coordinates != None:
location = coordinates_to_wkt(coordinates)
else:
if tweet.place != None:
location = bbox_to_wkt(tweet.place.bounding_box)
curs.execute("insert into tweets (username, followers_count, tweettext, full_place_name, place_type, coordinates, geometry) values (?, ?, ?, ?, ?, ?, ST_GeomFromText( ? , 4326));", \
(username, followers_count, tweettext.decode('utf-8'), full_place_name, place_type, location, location))
conn.commit()
process_query(api.search(q="Beer", count=100))
process_query(api.search(q="Jorn", count=100))
process_query(api.search(q="cairo", count=100))
process_query(api.search(q="washington", count=100))
conn.close()
os.system("qgis "+databasefile)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Twitter authentication (fill this!)
Step2: Using Tweepy instead of Twython (because it's more readily available via apt-get or zypper).
Step3: Database file to write to (fill this!)
Step4: SQLite opening
Step5: Coordinates to WKT
Step6: Bounding box to WKT
Step7: Process query
Step8: Example queries
Step9: Close the database
Step10: Visualise data
|
3,717
|
<ASSISTANT_TASK:>
Python Code:
# Import Node and Function module
from nipype import Node, Function
# Create a small example function
def add_two(x_input):
return x_input + 2
# Create Node
addtwo = Node(Function(input_names=["x_input"],
output_names=["val_output"],
function=add_two),
name='add_node')
addtwo.inputs.x_input =4
addtwo.run()
addtwo.result.outputs
from nipype import Node, Function
# Create the Function object
def get_random_array(array_shape):
# Import random function
from numpy.random import random
return random(array_shape)
# Create Function Node that executes get_random_array
rndArray = Node(Function(input_names=["array_shape"],
output_names=["random_array"],
function=get_random_array),
name='rndArray_node')
# Specify the array_shape of the random array
rndArray.inputs.array_shape = (3, 3)
# Run node
rndArray.run()
# Print output
print(rndArray.result.outputs)
from nipype import Node, Function
# Import random function
from numpy.random import random
# Create the Function object
def get_random_array(array_shape):
return random(array_shape)
# Create Function Node that executes get_random_array
rndArray = Node(Function(input_names=["array_shape"],
output_names=["random_array"],
function=get_random_array),
name='rndArray_node')
# Specify the array_shape of the random array
rndArray.inputs.array_shape = (3, 3)
# Run node
rndArray.run()
# Print output
print(rndArray.result.outputs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trap 1
Step2: Now, let's see what happens if we move the import of random outside the scope of get_random_array
|
3,718
|
<ASSISTANT_TASK:>
Python Code:
s_pattern = 4000 # number of data points in the pattern
t = np.arange(s_pattern)*0.001 # time points for the elements in the pattern
D = 2
pattern1 = np.vstack([np.sin(t*np.pi), np.cos(t*np.pi)]).T
pattern2 = np.vstack([np.sin(t*np.pi), -np.sin(t*np.pi)]).T
plt.subplot(1, 2, 1)
plt.plot(t, pattern1)
plt.title('pattern 1')
plt.subplot(1, 2, 2)
plt.plot(t, pattern2)
plt.title('pattern 2')
plt.show()
net = nengo.Network()
with net:
process = nengo.processes.WhiteSignal(period=100., high=3., y0=0)
rw = []
for i in range(D):
rw.append(nengolib.networks.RollingWindow(theta=0.5, n_neurons=3000, process=process, neuron_type=nengo.LIFRate()))
s_window = 500
t_window = np.linspace(0, 1, s_window)
inv_basis = rw[0].inverse_basis(t_window)
eval_points = []
target = []
for i in range(s_pattern):
eval_points.append(np.dot(inv_basis, np.roll(pattern1, i)[:s_window]).T)
eval_points.append(np.dot(inv_basis, np.roll(pattern2, i)[:s_window]).T)
target.append([1])
target.append([-1])
eval_points = np.array(eval_points).reshape(len(eval_points), -1)
with net:
pool = nengo.Ensemble(n_neurons=3000, dimensions=eval_points.shape[1],
neuron_type=nengo.LIFRate(), seed=1)
start = 0
for r in rw:
nengo.Connection(r.state, pool[start:start+r.state.size_out])
start += r.state.size_out
result = nengo.Node(None, size_in=1)
dec_conn = nengo.Connection(pool, result,
eval_points=eval_points, scale_eval_points=False,
function=target, synapse=0.1)
model = nengo.Network()
model.networks.append(net)
with model:
offsets = [-np.pi/2, np.pi]
def stim_func(t):
offset = offsets[int(t/5) % len(offsets)]
return np.sin(t*np.pi), np.sin(t*np.pi+offset)
ideal_results = [1.0, -1.0]
def ideal_func(t):
return ideal_results[int(t/5) % len(ideal_results)]
ideal_result = nengo.Node(ideal_func)
stim = nengo.Node(stim_func)
for i in range(D):
nengo.Connection(stim[i], rw[i].input, synapse=None)
p_result = nengo.Probe(result)
p_stim = nengo.Probe(stim)
p_pool = nengo.Probe(pool)
p_ideal = nengo.Probe(ideal_result)
sim = nengo.Simulator(model)
sim.run(10)
plt.plot(sim.trange(), sim.data[p_stim], label='input')
plt.plot(sim.trange(), sim.data[p_result], label='output (rmse:%1.3f)' % np.sqrt(np.mean((sim.data[p_ideal]-sim.data[p_result])**2)))
plt.legend(loc='best')
model = nengo.Network()
model.networks.append(net)
with model:
offsets = [-np.pi/2, np.pi]
def stim_func(t):
offset = offsets[int(t/5) % len(offsets)]
return np.sin(t*np.pi), np.sin(t*np.pi+offset)
stim = nengo.Node(stim_func)
for i in range(D):
nengo.Connection(stim[i], rw[i].input, synapse=None)
result2 = nengo.Node(None, size_in=1)
nengo.Connection(pool, result2, eval_points=sim.data[p_pool], function=sim.data[p_ideal], scale_eval_points=False)
p_result = nengo.Probe(result)
p_result2 = nengo.Probe(result2)
p_stim = nengo.Probe(stim)
sim2 = nengo.Simulator(model)
sim2.run(10)
plt.plot(sim2.trange(), sim2.data[p_stim], label='input')
plt.plot(sim2.trange(), sim2.data[p_result], label='output (rmse:%1.3f)' % np.sqrt(np.mean((sim.data[p_ideal]-sim2.data[p_result])**2)))
plt.plot(sim2.trange(), sim2.data[p_result2], label='trained (rmse:%1.3f)' % np.sqrt(np.mean((sim.data[p_ideal]-sim2.data[p_result2])**2)))
plt.legend(loc='best')
model = nengo.Network()
model.networks.append(net)
with model:
result2 = nengo.Node(None, size_in=1)
conn = nengo.Connection(pool, result2, eval_points=sim.data[p_pool], function=sim.data[p_ideal], scale_eval_points=False)
import nengo_encoder_learning
error = nengo_encoder_learning.improve(conn, learning_rate=1e-2, steps=10)
plt.plot(error)
model = nengo.Network()
model.networks.append(net)
with model:
offsets = [-np.pi/2, np.pi]
def stim_func(t):
offset = offsets[int(t/5) % len(offsets)]
return np.sin(t*np.pi), np.sin(t*np.pi+offset)
stim = nengo.Node(stim_func)
for i in range(D):
nengo.Connection(stim[i], rw[i].input, synapse=None)
result2 = nengo.Node(None, size_in=1)
nengo.Connection(pool, result2, eval_points=sim.data[p_pool], function=sim.data[p_ideal], scale_eval_points=False)
p_result = nengo.Probe(result)
p_result2 = nengo.Probe(result2)
p_stim = nengo.Probe(stim)
sim2 = nengo.Simulator(model)
sim2.run(10)
plt.plot(sim2.trange(), sim2.data[p_stim], label='input')
plt.plot(sim2.trange(), sim2.data[p_result], label='output (rmse:%1.3f)' % np.sqrt(np.mean((sim.data[p_ideal]-sim2.data[p_result])**2)))
plt.plot(sim2.trange(), sim2.data[p_result2], label='trained (rmse:%1.3f)' % np.sqrt(np.mean((sim.data[p_ideal]-sim2.data[p_result2])**2)))
plt.legend(loc='best')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's create a network that represents a rolling window in time (Aaron's "delay network"). The process determines what sort of pattern the network will be optimized for -- here we just go with white noise of a maximum of 3Hz. theta determines how big the rolling window is -- here we use 0.5 seconds.
Step2: Now we need to create the training data for decoding out of the rolling window. Our patterns are larger than the rolling window, so to create our training data we will take our patterns, shift them, and cut them down to the right size. In order to then give that to nengo, we also need to project from the window's space to the internal representation space (using the inv_basis).
Step3: Now we can create a connection optimized to do this decoding
Step4: Let's try feeding in those two patterns and see what the response is
Step5: Yay it works! Still a pretty hight rmse, though. Can we improve that?
Step6: That's some improvement! What else can we do?
Step7: That looks like it should improve things! How well does it do?
|
3,719
|
<ASSISTANT_TASK:>
Python Code:
my_variable = 10
print(my_variable)
a = 10
b = 15
print(a + b)
import this
print('Entering the for loop:\n')
for count in range(10):
print(count)
print('Still in the for loop.')
print("\nNow I'm done with the for loop.")
thing_1 = 47 # define an int object
print(thing_1.__doc__)
thing_1 = 'blah' # reassign thing_1 to an string object
print(thing_1.__doc__)
dir(thing_1)
thing_2 = 'abcd'
thing_3 = thing_1.__add__(thing_2)
print(thing_3)
thing_4 = thing_1 + thing_2
print(thing_4)
print(thing_3 == thing_4)
int_1 = 11
int_2 = 22
sum_1 = int_1.__add__(int_2)
sum_2 = int_1 + int_2
print(sum_1)
print(sum_2)
print(sum_1 == sum_2)
thing_1 + int_1
bool_1 = True
type(bool_1)
dir(bool_1)
bool_2 = False
bool_1 == bool_2
bool_1 + bool_2
bool_1 and bool_2
type(bool_1 * bool_2)
int_1 = 2
type(int_1)
dir(int_1)
int_2 = 3
print(int_1 - int_2)
int_1.__pow__(int_2)
int_1 ** int_2
int_1 / int_2
type(int_1 / int_2)
int_1.__truediv__(int_2)
int_1.__divmod__(int_2)
float_1 = 23.46
type(float_1)
dir(float_1)
float_2 = 3.
float_1 / float_2
int_1 < int_2
float_1 >= int_2
float_1 == float_2
str_1 = 'hello'
type(str_1)
dir(str_1)
a = 'Hi'
print(a*5)
# count the number of occurances of a sub-string
"Hi there I'm Nick".count('i')
# Find the next index of a substring
"Hi there I'm Nick".find('i')
"Hi there I'm Nick".find('i', 2)
# Insert variables into a string
digit = 7
'The digit "7" should appear at the end of this sentence: {}.'.format(digit)
another_digit = 15
'This sentence will have two digits at the end: {} and {}.'.format(digit, another_digit)
# Replace a sub-string with another sub-string
my_sentence = "Hi there I'm Nick"
my_sentence.replace('e', 'E')
my_sentence.replace('N', '')
my_list = ['a', 'b', 'c', 'a']
len(my_list)
my_list.append(1)
print(my_list)
my_list[0] # indexing in Python starts at 0!
my_list[4]
my_list[-1] # negative indexes count backward from the end of the list
type(my_list[0])
type(my_list[-1])
# let's do something crazy
my_list.append(my_list)
type(my_list[-1])
my_list
my_list[-1]
my_list[-1][-1]
my_list[-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1][-1]
my_list = ['a', 'b', 1]
my_list[0] = 'c'
my_list
my_list.remove(1)
my_list
my_tuple = ('a', 'b', 1, 'a')
my_tuple[2]
my_tuple[0] = 'c'
my_tuple.append('c')
my_tuple.remove(1)
my_set = {'a', 'b', 1, 'a'}
print(my_set) # note that order
my_set.add('c')
print(my_set)
my_set[0]
my_list = ['a', 'a', 'a', 'a', 'b', 'b', 'b']
my_list
my_set = set(my_list)
len(my_set)
my_list = list(range(1000000)) # list of numbers 0 - 999,999
my_set = set(my_list)
%%timeit
999999 in my_list
%%timeit
999999 in my_set
my_dict = {'name': 'Nick',
'birthday': 'July 13',
'years_in_durham': 4}
my_dict['name']
my_dict['years_in_durham']
my_dict['favorite_restaurant'] = 'Mateo'
my_dict['favorite_restaurant']
my_dict['age'] # hey, that's personal. Also, it's not a key in the dictionary.
my_dict.keys()
my_dict.values()
x = 3
if x < 2:
print('x less than 2')
elif x < 4:
print('x less than 4, greater than or equal to 2')
else:
print('x greater than or equal to 4')
my_list = ['a', 'b', 'c']
for element in my_list:
print(element)
for i in range(5): # iterate over all integers (starting at 0) less than 5
print(i)
for i in range(2, 6, 3): # iterate over integers (starting at 2) less than 6, increasing by 3
print(i)
my_list = ['a', 'b', 'c']
idx = 0
while idx < len(my_list):
print(my_list[idx])
idx += 1
my_list = ['a', 'b', 'c']
for element in my_list:
print(element)
my_list = range(1000000)
%%timeit
sum(my_list)
%%timeit
my_sum = 0
for element in my_list:
my_sum += element
my_sum
def double_it(x):
return x * 2
double_it(5)
double_it('hello') # remember 'hello' * 2 from before?
double_it({'a', 'b'}) # but there's no notion of multiplication for sets
def multiply_them(x, y, extra_arg1=None, extra_arg2=None):
if extra_arg1 is not None:
print(extra_arg1)
if extra_arg2 is not None:
print(extra_arg2)
print('multiplying {} and {}...'.format(x, y))
return x * y
multiply_them(3, 5)
multiply_them(3, 5, extra_arg1='hello')
multiply_them(3, 5, extra_arg2='world', extra_arg1='hello')
multiply_them(extra_arg2='world', extra_arg1='hello', 3, 5)
import random # create (pseudo-) random numbers
random.random()
import math # common mathematical functions that aren't built into base Python
print(math.factorial(5))
math.log10(100)
import statistics # some basic summary statistics
my_list = [1, 2, 3, 4, 5]
statistics.mean(my_list)
statistics.median(my_list)
statistics.stdev(my_list)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ... and then access that variable in a later cell
Step2: We'll be using Jupyter notebooks extensively in this class. I'll give a more detailed introduction during the first class, but for now, the most important thing is to understand how to run code in the notebook.
Step3: You can also edit the text in markdown cells. To display the editable, raw markdown in a text cell, double click on the cell. You can now put your cursor in the cell and edit it directly. When you're done editing, press Shift+Enter to render the cell into a more readable format.
Step4: Python syntax should be easy to write, but most importantly, well-written Python code should be easy to read. Code that follows these norms is called Pythonic. We'll touch a bit more on what it means to write Pythonic code in class.
Step5: Note that there is no explicit symbol or keyword that defines the scope of code executed during each iteration - it's the indentation that defines the scope of the loop. When you define a function or class, or write a control structure like a for look or if statement, you should indent the next line (4 spaces is customary). Each subsequent line at that same level of indentation is considered part of the scope. You only escape the scope when you return to the previous level of indentation.
Step6: To learn more about what attributes and methods a given object has, you can call dir(my_object)
Step7: That's interesting - it looks like the string object has a method called __add__. Let's see what it does -
Step8: So calling __add__ with two strings creates a new string that is the concatenation of the two originals. As an aside, there are a lot more methods we can call on strings - split, upper, find, etc. We'll come back to this.
Step9: Any object you can add to another object in Python has an __add__ method. With integer addition, this works exactly as we would expect
Step10: But it's unclear what to do when someone tries to add an int to a str
Step11: Data types
Step12: Integers
Step13: One change from Python 2 to Python 3 is the default way that integers are divided. In Python 2, the result of 2/3 is 0, the result of 4/3 is 1, etc. In other words, dividing integers in Python 2 always returned an integer with any remainder truncated. In Python 3, the result of the division of integers is always a float, with a decimal approximation of the remainder included. For example
Step14: Floats
Step15: With ints and floats, we can also do comparison operators like in other languages
Step16: Strings
Step17: We already saw that the + operator concatenates two strings. Generalizing from this, what do you expect the * operator to do?
Step18: There are a number of very useful methods built into Python str objects. A few that you might find yourself needing to use when dealing with text data include
Step19: There are plenty more useful string functions - use either the dir() function or Google to learn more about what's available.
Step20: To access individual list elements by their position, use square brackets
Step21: Lists can hold arbitrary objects!
Step22: Lists are also mutable objects, meaning that any part of them can be changed at any time. This makes them very flexible objects for storing data in a program.
Step23: Tuples
Step24: Sets
Step25: Note above that the order of items in a set doesn't have the same meaning as in lists and tuples.
Step26: Sets are used for a couple reasons. Sometimes, finding the number of unique items in a list or tuple is important. In this case, we can convert the list/tuple to a set, then call len on the new set. For example,
Step27: The other reason is that the in keyword for testing a collection for membership of an object is much faster for a list than a set.
Step28: Any idea why there's such a discrepancy?
Step29: In addition to accessing values by keys, you can retrieve the keys and values by themselves as lists
Step30: Note that if you're using Python 3.5 or earlier, the order that you insert key/value pairs into the dictionary doesn't correspond to the order they're stored in by default (we inserted favorite_restaurant after years_in_durham!). This default behavior was just recently changed in Python 3.6 (released in December 2016).
Step31: For loops
Step32: To iterate for a specific number of times, you can create an iterator object with the range function
Step33: While loops
Step34: There are occasionally other reasons for using while loops (waiting for an external input, for example), but we won't make extensive use of them in this course.
Step35: Some common mathematical functions that are built into Python
Step36: Python has dynamic typing, which (in part) means that the arguments to functions aren't assigned a specific type
Step37: Required arguments vs optional arguments
Step38: Modules
|
3,720
|
<ASSISTANT_TASK:>
Python Code:
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from matplotlib.colors import ListedColormap
def plot_decision_regions_3class(data_set,model=None):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00','#00AAFF'])
X=data_set.x.numpy()
y=data_set.y.numpy()
h = .02
x_min, x_max = X[:, 0].min()-0.1 , X[:, 0].max()+0.1
y_min, y_max = X[:, 1].min()-0.1 , X[:, 1].max() +0.1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))
newdata=np.c_[xx.ravel(), yy.ravel()]
#XX=torch.torch.Tensor(newdata)
#_,yhat=torch.max(model(XX),1)
#yhat=yhat.numpy().reshape(xx.shape)
Z=data_set.fun(newdata).flatten()
f=np.zeros(Z.shape)
f[Z>0]=1
f=f.reshape(xx.shape)
if model!=None:
model.eval()
XX=torch.torch.Tensor(newdata)
_,yhat=torch.max(model(XX),1)
yhat=yhat.numpy().reshape(xx.shape)
plt.pcolormesh(xx, yy, yhat, cmap=cmap_light)
plt.contour(xx, yy, f, cmap=plt.cm.Paired)
else:
plt.contour(xx, yy, f, cmap=plt.cm.Paired)
plt.pcolormesh(xx, yy, f, cmap=cmap_light)
plt.title("decision region vs True decision boundary")
plt.legend()
def accuracy(model,data_set):
_,yhat=torch.max(model(data_set.x),1)
return (yhat==data_set.y).numpy().mean()
from torch.utils.data import Dataset, DataLoader
class Data(Dataset):
def __init__(self,N_SAMPLES = 1000,noise_std=0.1,train=True):
a=np.matrix([-1,1,2,1,1,-3,1]).T
self.x = np.matrix(np.random.rand(N_SAMPLES,2))
self.f=np.array(a[0]+(self.x)*a[1:3]+np.multiply(self.x[:,0], self.x[:,1])*a[4]+np.multiply(self.x, self.x)*a[5:7]).flatten()
self.a=a
self.y=np.zeros(N_SAMPLES)
self.y[self.f> 0]=1
self.y=torch.from_numpy(self.y).type(torch.LongTensor)
self.x=torch.from_numpy(self.x).type(torch.FloatTensor)
self.x = self.x+noise_std*torch.randn(self.x.size())
self.f=torch.from_numpy(self.f)
self.a=a
if train==True:
torch.manual_seed(1)
self.x = self.x+noise_std*torch.randn(self.x.size())
torch.manual_seed(0)
def __getitem__(self,index):
return self.x[index],self.y[index]
def __len__(self):
return self.len
def plot(self):
X=data_set.x.numpy()
y=data_set.y.numpy()
h = .02
x_min, x_max = X[:, 0].min() , X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))
Z=data_set.fun(np.c_[xx.ravel(), yy.ravel()]).flatten()
f=np.zeros(Z.shape)
f[Z>0]=1
f=f.reshape(xx.shape)
plt.title('True decision boundary and sample points with noise ')
plt.plot(self.x[self.y==0,0].numpy(),self.x[self.y==0,1].numpy(),'bo',label='y=0' )
plt.plot(self.x[self.y==1,0].numpy(), self.x[self.y==1,1].numpy(),'ro',label='y=1' )
plt.contour(xx, yy, f, cmap=plt.cm.Paired)
plt.xlim(0,1)
plt.ylim(0,1)
plt.legend()
def fun(self,x):
x=np.matrix(x)
out=np.array(self.a[0]+(x)*self.a[1:3]+np.multiply(x[:,0], x[:,1])*self.a[4]+np.multiply(x, x)*self.a[5:7])
out=np.array(out)
return out
data_set=Data(noise_std=0.2)
data_set.plot()
torch.manual_seed(0)
validation_set=Data(train=False)
model_drop.train()
optimizer_ofit = torch.optim.Adam(model.parameters(), lr=0.01)
optimizer_drop = torch.optim.Adam(model_drop.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
LOSS={}
LOSS['training data no dropout']=[]
LOSS['validation data no dropout']=[]
LOSS['training data dropout']=[]
LOSS['validation data dropout']=[]
epochs=500
for epoch in range(epochs):
#make a prediction for both models
yhat = model(data_set.x)
yhat_drop = model_drop(data_set.x)
#calculate the lossf or both models
loss = criterion(yhat, data_set.y)
loss_drop = criterion(yhat_drop, data_set.y)
#store the loss for both the training and validation data for both models
LOSS['training data no dropout'].append(loss.item())
LOSS['validation data no dropout'].append(criterion(model(validation_set.x), validation_set.y).item())
LOSS['training data dropout'].append(loss_drop.item())
model_drop.eval()
LOSS['validation data dropout'].append(criterion(model_drop(validation_set.x), validation_set.y).item())
model_drop.train()
#clear gradient
optimizer_ofit.zero_grad()
optimizer_drop.zero_grad()
#Backward pass: compute gradient of the loss with respect to all the learnable parameters
loss.backward()
loss_drop.backward()
#the step function on an Optimizer makes an update to its parameters
optimizer_ofit.step()
optimizer_drop.step()
model_drop.eval()
plot_decision_regions_3class(data_set)
plot_decision_regions_3class(data_set,model)
plot_decision_regions_3class(data_set,model_drop)
plt.figure(figsize=(6.1, 10))
for key, value in LOSS.items():
plt.plot(np.log(np.array(value)),label=key)
plt.legend()
plt.xlabel("iterations")
plt.ylabel("Log of cost or total loss")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use this function only for plotting
Step2: Use this function to calculate accuracy
Step3: <a id="ref0"></a>
Step4: Create a dataset object
Step5: Get some validation data
Step6: <a id="ref1"></a>
Step7: Train the model by using the Adam optimizer. See the unit on other optimizers. Use the Cross Entropy Loss
Step8: Create the appropriate loss function.
Step9: Run 500 iterations of batch gradient decent
Step10: Set the model with dropout to evaluation mode
Step11: Test the accuracy of the model without dropout on the validation data.
Step12: model without dropout
Step13: model with dropout
Step14: You can see that the model using dropout does better at tracking the function that generated the data.
|
3,721
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
df = pd.read_json('../data/raw/train.json')
df['created'] = df['created'].apply(lambda row: pd.to_datetime(row))
def relative_count(df, column):
# Calculate counts per bedroom and interest_level
grouped = df.groupby([column, 'interest_level'])[column].count()
grouped_df = pd.DataFrame({column: grouped.index.get_level_values(0),
'interest_level': grouped.index.get_level_values(1),
'count': grouped.values})
# Get the total counts per bedroom
group_counts = df.groupby([column])[column].count()
# Calculate relative counts per group. This allows us to see more easily
# if there are differences
grouped_df['relative_count'] = grouped_df.apply(
lambda row: row['count'] / group_counts[row[column]], axis=1)
return grouped_df
# Combine all apartments with 5+ bedrooms into one category
df_bedrooms = df.copy(deep=True)
df_bedrooms['bedrooms'] = df_bedrooms['bedrooms'].apply(lambda b: str(b) if b <= 4 else '5+')
grouped_df = relative_count(df_bedrooms, 'bedrooms')
plt.figure(figsize=(8, 6))
sns.barplot(x='bedrooms', y='relative_count', hue='interest_level', data=grouped_df,
hue_order=['low', 'medium', 'high']);
import math
# Combine all apartments with 3+ bathrooms into one category and round half bathrooms down
df_bathrooms = df.copy(deep=True)
df_bathrooms['bathrooms'] = df_bathrooms['bathrooms'].apply(lambda x: int(math.floor(x)))
df_bathrooms['bathrooms'] = df_bathrooms['bathrooms'].apply(lambda b: str(b) if b <= 2 else '3+')
grouped_df = relative_count(df_bathrooms, 'bathrooms')
plt.figure(figsize=(8, 6))
sns.barplot(x='bathrooms', y='relative_count', hue='interest_level', data=grouped_df,
hue_order=['low', 'medium', 'high']);
sns.lmplot(x='bedrooms', y='bathrooms', x_jitter=0.5, y_jitter=0.5,
data=df, hue='interest_level', hue_order=['low', 'medium', 'high'], size=8);
import math
# Combine all apartments with 3+ bathrooms into one category and round half bathrooms down
df_photos = df.copy(deep=True)
df_photos['photo_count'] = df_photos['photos'].apply(len)
df_photos['photo_count'] = df_photos['photo_count'].apply(lambda x: int(math.floor(x)))
df_photos['photo_count'] = df_photos['photo_count'].apply(lambda b: str(b) if b <= 10 else '11+')
grouped_df = relative_count(df_photos, 'photo_count')
plt.figure(figsize=(8, 6))
sns.barplot(x='photo_count', y='relative_count', hue='interest_level', data=grouped_df,
order=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11+'],
hue_order=['low', 'medium', 'high']);
df_cheaper = df[df['price'] < 10000]
sns.violinplot(x='interest_level', y='price', data=df_cheaper);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bedrooms
Step2: We can see that the interest level rises slightly for apartments with more than 1 bedrooms, but for 5 bedrooms and more it falls sharply.
Step3: We can see that the interest level is lowest for apartments with 0 bathrooms and also low for apartments with 3+ bathrooms.
Step4: This plot is not so simple to read, because I applied a lot of jitter. We can see that apartments with zero or one bedrooms almost all the time have 1 bathroom. For two and three bedrooms there are both apartments with one and with two bathrooms. For more bedrooms the data gets quite spare, but the number of bathrooms rises.
Step5: We can see that the interest level for 0 photos is really low (which was expected) and if there are pictures, it has a quite nice distribution with high interest rates at about 4-8 photos.
|
3,722
|
<ASSISTANT_TASK:>
Python Code:
%run "../Functions/2. Google form analysis.ipynb"
# Localplayerguids of users who answered the questionnaire (see below).
# French
#localplayerguid = 'a4d4b030-9117-4331-ba48-90dc05a7e65a'
#localplayerguid = 'd6826fd9-a6fc-4046-b974-68e50576183f'
#localplayerguid = 'deb089c0-9be3-4b75-9b27-28963c77b10c'
#localplayerguid = '75e264d6-af94-4975-bb18-50cac09894c4'
#localplayerguid = '3d733347-0313-441a-b77c-3e4046042a53'
# English
localplayerguid = '8d352896-a3f1-471c-8439-0f426df901c1'
#localplayerguid = '7037c5b2-c286-498e-9784-9a061c778609'
#localplayerguid = '5c4939b5-425b-4d19-b5d2-0384a515539e'
#localplayerguid = '7825d421-d668-4481-898a-46b51efe40f0'
#localplayerguid = 'acb9c989-b4a6-4c4d-81cc-6b5783ec71d8'
#localplayerguid = devPCID5
len(getAllResponders())
userIdThatDidNotAnswer in gform['userId'].values, hasAnswered( userIdThatDidNotAnswer )
assert(not hasAnswered( userIdThatDidNotAnswer )), "User has NOT answered"
assert(hasAnswered( userId1AnswerEN )), "User HAS answered"
assert(hasAnswered( userIdAnswersEN )), "User HAS answered"
assert(hasAnswered( userId1AnswerFR )), "User HAS answered"
assert(hasAnswered( userIdAnswersFR )), "User HAS answered"
assert(hasAnswered( userIdAnswersENFR )), "User HAS answered"
assert (len(getAnswers( userIdThatDidNotAnswer ).columns) == 0),"Too many answers"
assert (len(getAnswers( userId1AnswerEN ).columns) == 1),"Too many answers"
assert (len(getAnswers( userIdAnswersEN ).columns) >= 2),"Not enough answers"
assert (len(getAnswers( userId1AnswerFR ).columns) == 1),"Not enough columns"
assert (len(getAnswers( userIdAnswersFR ).columns) >= 2),"Not enough answers"
assert (len(getAnswers( userIdAnswersENFR ).columns) >= 2),"Not enough answers"
assert (len(getCorrections( userIdThatDidNotAnswer ).columns) == 0),"Too many answers"
assert (len(getCorrections( userId1AnswerEN ).columns) == 2),"Too many answers"
assert (len(getCorrections( userIdAnswersEN ).columns) >= 4),"Not enough answers"
assert (len(getCorrections( userId1AnswerFR ).columns) == 2),"Too many answers"
assert (len(getCorrections( userIdAnswersFR ).columns) >= 4),"Not enough answers"
assert (len(getCorrections( userIdAnswersENFR ).columns) >= 4),"Not enough answers"
assert (len(pd.DataFrame(getScore( userIdThatDidNotAnswer ).values.flatten().tolist()).values.flatten().tolist()) == 0),"Too many answers"
score = getScore( userId1AnswerEN )
#print(score)
assert (
(len(score.values.flatten()) == 3)
and
score[answerTemporalities[0]][0][0] == 0
),"Incorrect score"
score = getScore( userIdAnswersEN )
#print(score)
assert (
(len(score.values.flatten()) == 3)
and
score[answerTemporalities[0]][0][0] == 5
and
score[answerTemporalities[1]][0][0] == 25
),"Incorrect score"
score = getScore( userId1AnswerFR )
#print(score)
assert (
(len(score.values.flatten()) == 3)
and
score[answerTemporalities[0]][0][0] == 23
),"Incorrect score"
score = getScore( userIdAnswersFR )
#print(score)
assert (
(len(score.values.flatten()) == 3)
and
score[answerTemporalities[0]][0][0] == 15
and
score[answerTemporalities[1]][0][0] == 26
),"Incorrect score"
score = getScore( userIdAnswersENFR )
#print(score)
assert (
(len(score.values.flatten()) == 3)
and
score[answerTemporalities[0]][0][0] == 4
and
score[answerTemporalities[1]][0][0] == 13
),"Incorrect score"
objective = 0
assert (len(getValidatedCheckpoints( userIdThatDidNotAnswer )) == objective),"Incorrect number of answers"
objective = 1
assert (len(getValidatedCheckpoints( userId1AnswerEN )) == objective),"Incorrect number of answers"
assert (getValidatedCheckpoints( userId1AnswerEN )[0].equals(validableCheckpoints)) \
, "User has validated everything"
objective = 2
assert (len(getValidatedCheckpoints( userIdAnswersEN )) == objective),"Incorrect number of answers"
objective = 3
assert (len(getValidatedCheckpoints( userIdAnswersEN )[0]) == objective) \
, "User has validated " + objective + " chapters on first try"
objective = 1
assert (len(getValidatedCheckpoints( userId1AnswerFR )) == objective),"Incorrect number of answers"
assert (getValidatedCheckpoints( userId1AnswerFR )[0].equals(validableCheckpoints)) \
, "User has validated everything"
objective = 2
assert (len(getValidatedCheckpoints( userIdAnswersFR )) == objective),"Incorrect number of answers"
objective = 5
assert (len(getValidatedCheckpoints( userIdAnswersFR )[1]) == objective) \
, "User has validated " + objective + " chapters on second try"
objective = 2
assert (len(getValidatedCheckpoints( userIdAnswersENFR )) == objective),"Incorrect number of answers"
objective = 5
assert (len(getValidatedCheckpoints( userIdAnswersENFR )[1]) == objective) \
, "User has validated " + objective + " chapters on second try"
getValidatedCheckpoints( userIdThatDidNotAnswer )
pd.Series(getValidatedCheckpoints( userIdThatDidNotAnswer ))
type(getNonValidated(pd.Series(getValidatedCheckpoints( userIdThatDidNotAnswer ))))
validableCheckpoints
assert(getNonValidated(getValidatedCheckpoints( userIdThatDidNotAnswer ))).equals(validableCheckpoints), \
"incorrect validated checkpoints: should contain all checkpoints that can be validated"
testSeries = pd.Series(
[
'', # 7
'', # 8
'', # 9
'', # 10
'tutorial1.Checkpoint00', # 11
'tutorial1.Checkpoint00', # 12
'tutorial1.Checkpoint00', # 13
'tutorial1.Checkpoint00', # 14
'tutorial1.Checkpoint02', # 15
'tutorial1.Checkpoint01', # 16
'tutorial1.Checkpoint05'
]
)
assert(getNonValidated(pd.Series([testSeries]))[0][0] == 'tutorial1.Checkpoint13'), "Incorrect non validated checkpoint"
getNonValidatedCheckpoints( userIdThatDidNotAnswer )
getNonValidatedCheckpoints( userId1AnswerEN )
getNonValidatedCheckpoints( userIdAnswersEN )
getNonValidatedCheckpoints( userId1AnswerFR )
getNonValidatedCheckpoints( userIdAnswersFR )
getNonValidatedCheckpoints( userIdAnswersENFR )
getValidatedCheckpointsCounts(userIdThatDidNotAnswer)
getValidatedCheckpointsCounts(userId1AnswerEN)
getValidatedCheckpointsCounts(userIdAnswersEN)
getValidatedCheckpointsCounts(userId1ScoreEN)
getValidatedCheckpointsCounts(userIdScoresEN)
getValidatedCheckpointsCounts(userId1AnswerFR)
getValidatedCheckpointsCounts(userIdAnswersFR)
getValidatedCheckpointsCounts(userId1ScoreFR)
getValidatedCheckpointsCounts(userIdScoresFR)
getValidatedCheckpointsCounts(userIdAnswersENFR)
getNonValidatedCheckpointsCounts(userIdThatDidNotAnswer)
getNonValidatedCheckpointsCounts(userId1AnswerEN)
getNonValidatedCheckpointsCounts(userIdAnswersEN)
getNonValidatedCheckpointsCounts(userId1ScoreEN)
getNonValidatedCheckpointsCounts(userIdScoresEN)
getNonValidatedCheckpointsCounts(userId1AnswerFR)
getNonValidatedCheckpointsCounts(userIdAnswersFR)
getNonValidatedCheckpointsCounts(userId1ScoreFR)
getNonValidatedCheckpointsCounts(userIdScoresFR)
getNonValidatedCheckpointsCounts(userIdAnswersENFR)
aYes = ["Yes", "Oui"]
aNo = ["No", "Non"]
aNoIDK = ["No", "Non", "I don't know", "Je ne sais pas"]
# How long have you studied biology?
qBiologyEducationLevelIndex = 5
aBiologyEducationLevelHigh = ["Until bachelor's degree", "Jusqu'à la license"]
aBiologyEducationLevelLow = ['Until the end of high school', 'Until the end of middle school', 'Not even in middle school'\
"Jusqu'au bac", "Jusqu'au brevet", 'Jamais']
# Have you ever heard about BioBricks?
qHeardBioBricksIndex = 8
# Have you played the current version of Hero.Coli?
qPlayedHerocoliIndex = 10
qPlayedHerocoliYes = ['Yes', 'Once', 'Multiple times', 'Oui',
'De nombreuses fois', 'Quelques fois', 'Une fois']
qPlayedHerocoliNo = ['No', 'Non',]
gform[QStudiedBiology].unique()
gform['Before playing Hero.Coli, had you ever heard about BioBricks?'].unique()
gform['Have you played the current version of Hero.Coli?'].unique()
getAllAnswerRows(qBiologyEducationLevelIndex, aBiologyEducationLevelHigh)
assert(len(getAllAnswerRows(qBiologyEducationLevelIndex, aBiologyEducationLevelHigh)) != 0)
assert(len(getAllAnswerRows(qBiologyEducationLevelIndex, aBiologyEducationLevelLow)) != 0)
assert(len(getAllAnswerRows(qHeardBioBricksIndex, aYes)) != 0)
assert(len(getAllAnswerRows(qHeardBioBricksIndex, aNoIDK)) != 0)
assert(len(getAllAnswerRows(qPlayedHerocoliIndex, qPlayedHerocoliYes)) != 0)
assert(len(getAllAnswerRows(qPlayedHerocoliIndex, qPlayedHerocoliNo)) != 0)
questionIndex = 15
gform.iloc[:, questionIndex].head()
(qBiologyEducationLevelIndex, aBiologyEducationLevelHigh)
getAllAnswerRows(qBiologyEducationLevelIndex, aBiologyEducationLevelHigh)
getPercentCorrectKnowingAnswer(qBiologyEducationLevelIndex, aBiologyEducationLevelHigh)
getPercentCorrectKnowingAnswer(qBiologyEducationLevelIndex, aBiologyEducationLevelLow)
getPercentCorrectKnowingAnswer(qHeardBioBricksIndex, aYes)
getPercentCorrectKnowingAnswer(qHeardBioBricksIndex, aNoIDK)
playedHerocoliIndexYes = getPercentCorrectKnowingAnswer(qPlayedHerocoliIndex, qPlayedHerocoliYes)
playedHerocoliIndexYes
playedHerocoliIndexNo = getPercentCorrectKnowingAnswer(qPlayedHerocoliIndex, qPlayedHerocoliNo)
playedHerocoliIndexNo
playedHerocoliIndexYes - playedHerocoliIndexNo
(playedHerocoliIndexYes - playedHerocoliIndexNo) / (1 - playedHerocoliIndexNo)
#gform = gformEN
transposed = gform.T
#answers = transposed[transposed[]]
transposed
type(gform)
gform.columns
gform.columns.get_loc('Do not edit - pre-filled anonymous ID')
localplayerguidkey
# Using the whole question:
gform[localplayerguidkey]
# Get index from question
localplayerguidindex
# Using the index of the question:
gform.iloc[:, localplayerguidindex]
sample = gform
#def getUniqueUserCount(sample):
sample[localplayerguidkey].nunique()
userIds = gform[localplayerguidkey].unique()
len(userIds)
allResponders = getAllResponders()
uniqueUsers = np.unique(allResponders)
print(len(allResponders))
print(len(uniqueUsers))
for guid in uniqueUsers:
if(not isGUIDFormat(guid)):
print('incorrect guid: ' + str(guid))
uniqueUsers = getAllResponders()
userCount = len(uniqueUsers)
guid = '0'
while (not isGUIDFormat(guid)):
userIndex = randint(0,userCount-1)
guid = uniqueUsers[userIndex]
guid
#userId = userIdThatDidNotAnswer
#userId = userId1AnswerEN
userId = userIdAnswersEN
_form = gform
#def getAnswers( userId, _form = gform ):
answers = _form[_form[localplayerguidkey]==userId]
_columnAnswers = answers.T
if 0 != len(answers):
_newColumns = []
for column in _columnAnswers.columns:
_newColumns.append(answersColumnNameStem + str(column))
_columnAnswers.columns = _newColumns
else:
# user has never answered
print("user " + str(userId) + " has never answered")
_columnAnswers
answers
# Selection of a specific answer
answers.iloc[:,localplayerguidindex]
answers.iloc[:,localplayerguidindex].iloc[0]
type(answers.iloc[0,:])
answers.iloc[0,:].values
#### Question that has a correct answer:
questionIndex = 15
answers.iloc[:,questionIndex].iloc[0]
correctAnswers.iloc[questionIndex][0]
answers.iloc[:,questionIndex].iloc[0].startswith(correctAnswers.iloc[questionIndex][0])
#### Question that has no correct answer:
questionIndex = 0
#answers.iloc[:,questionIndex].iloc[0].startswith(correctAnswers.iloc[questionIndex].iloc[0])
#### Batch check:
columnAnswers = getAnswers( userId )
columnAnswers.values[2,0]
columnAnswers[columnAnswers.columns[0]][2]
correctAnswers
type(columnAnswers)
indexOfFirstEvaluationQuestion = 13
columnAnswers.index[indexOfFirstEvaluationQuestion]
gform.tail(50)
gform[gform[localplayerguidkey] == 'ba202bbc-af77-42e8-85ff-e25b871717d5']
gformRealBefore = gform.loc[88, QTimestamp]
gformRealBefore
gformRealAfter = gform.loc[107, QTimestamp]
gformRealAfter
RMRealFirstEvent = getFirstEventDate(gform.loc[88,localplayerguidkey])
RMRealFirstEvent
tzAnswerDate = gformRealBefore
gameEventDate = RMRealFirstEvent
#def getTemporality( answerDate, gameEventDate ):
result = answerTemporalities[2]
if(gameEventDate != pd.Timestamp.max.tz_localize('utc')):
if(answerDate <= gameEventDate):
result = answerTemporalities[0]
elif (answerDate > gameEventDate):
result = answerTemporalities[1]
result, tzAnswerDate, gameEventDate
firstEventDate = getFirstEventDate(gform.loc[userIndex,localplayerguidkey])
firstEventDate
gformTestBefore = pd.Timestamp('2018-01-16 14:28:20.998000+0000', tz='UTC')
getTemporality(gformTestBefore,firstEventDate)
gformTestWhile = pd.Timestamp('2018-01-16 14:28:23.998000+0000', tz='UTC')
getTemporality(gformTestWhile,firstEventDate)
gformTestAfter = pd.Timestamp('2018-01-16 14:28:24.998000+0000', tz='UTC')
getTemporality(gformTestAfter,firstEventDate)
_form = gform
_rmDF = rmdf1522
_rmTestDF = normalizedRMDFTest
includeAndroid = True
#def getTestAnswers( _form = gform, _rmDF = rmdf1522, _rmTestDF = normalizedRMDFTest, includeAndroid = True):
_form[_form[localplayerguidkey].isin(testUsers)]
_form[localplayerguidkey]
testUsers
len(getTestAnswers()[localplayerguidkey])
rmdf1522['customData.platform'].unique()
rmdf1522[rmdf1522['customData.platform'].apply(lambda s: str(s).endswith('editor'))]
rmdf1522[rmdf1522['userId'].isin(getTestAnswers()[localplayerguidkey])][['userTime','customData.platform','userId']].dropna()
columnAnswers
#testUserId = userId1AnswerEN
testUserId = '8d352896-a3f1-471c-8439-0f426df901c1'
getCorrections(testUserId)
testUserId = '8d352896-a3f1-471c-8439-0f426df901c1'
source = correctAnswers
#def getCorrections( _userId, _source = correctAnswers, _form = gform ):
columnAnswers = getAnswers( testUserId )
if 0 != len(columnAnswers.columns):
questionsCount = len(columnAnswers.values)
for columnName in columnAnswers.columns:
if answersColumnNameStem in columnName:
answerNumber = columnName.replace(answersColumnNameStem,"")
newCorrectionsColumnName = correctionsColumnNameStem + answerNumber
columnAnswers[newCorrectionsColumnName] = columnAnswers[columnName]
columnAnswers[newCorrectionsColumnName] = pd.Series(np.full(questionsCount, np.nan))
for question in columnAnswers[columnName].index:
#print()
#print(question)
__correctAnswers = source.loc[question]
if(len(__correctAnswers) > 0):
columnAnswers.loc[question,newCorrectionsColumnName] = False
for correctAnswer in __correctAnswers:
#print("-> " + correctAnswer)
if str(columnAnswers.loc[question,columnName])\
.startswith(str(correctAnswer)):
columnAnswers.loc[question,newCorrectionsColumnName] = True
break
else:
# user has never answered
print("can't give correct answers")
columnAnswers
question = QAge
columnName = ''
for column in columnAnswers.columns:
if str.startswith(column, 'answers'):
columnName = column
break
type(columnAnswers.loc[question,columnName])
getCorrections(localplayerguid)
gform.columns[20]
columnAnswers.loc[gform.columns[20],columnAnswers.columns[1]]
columnAnswers[columnAnswers.columns[1]][gform.columns[13]]
columnAnswers.loc[gform.columns[13],columnAnswers.columns[1]]
columnAnswers.iloc[20,1]
questionsCount
np.full(3, np.nan)
pd.Series(np.full(questionsCount, np.nan))
columnAnswers.loc[question,newCorrectionsColumnName]
question
correctAnswers[question]
getCorrections('8d352896-a3f1-471c-8439-0f426df901c1')
correctAnswersEN
#demographicAnswersEN
type([])
mergedCorrectAnswersEN = correctAnswersEN.copy()
for index in mergedCorrectAnswersEN.index:
#print(str(mergedCorrectAnswersEN.loc[index,column]))
mergedCorrectAnswersEN.loc[index] =\
demographicAnswersEN.loc[index] + mergedCorrectAnswersEN.loc[index]
mergedCorrectAnswersEN
correctAnswersEN + demographicAnswersEN
correctAnswers + demographicAnswers
corrections = getCorrections(userIdAnswersENFR)
#corrections
for columnName in corrections.columns:
if correctionsColumnNameStem in columnName:
for index in corrections[columnName].index:
if(True==corrections.loc[index,columnName]):
corrections.loc[index,columnName] = 1
elif (False==corrections.loc[index,columnName]):
corrections.loc[index,columnName] = 0
corrections
binarized = getBinarizedCorrections(corrections)
binarized
slicedBinarized = binarized[13:40]
slicedBinarized
slicedBinarized =\
binarized[13:40][binarized.columns[\
binarized.columns.to_series().str.contains(correctionsColumnNameStem)\
]]
slicedBinarized
_source = correctAnswers
_userId = getRandomGFormGUID()
getCorrections(_userId, _source=_source, _form = gform)
_userId = '5e978fb3-316a-42ba-bb58-00856353838d'
gform[gform[localplayerguidkey] == _userId].iloc[0].index
_gformLine = gform[gform[localplayerguidkey] == _userId].iloc[0]
_gformLine.loc['Before playing Hero.Coli, had you ever heard about synthetic biology?']
_gformLine = gform[gform[localplayerguidkey] == _userId].iloc[0]
# only for one user
# def getBinarized(_gformLine, _source = correctAnswers):
_notEmptyIndexes = []
for _index in _source.index:
if(len(_source.loc[_index]) > 0):
_notEmptyIndexes.append(_index)
_binarized = pd.Series(np.full(len(_gformLine.index), np.nan), index = _gformLine.index)
for question in _gformLine.index:
_correctAnswers = _source.loc[question]
if(len(_correctAnswers) > 0):
_binarized[question] = 0
for _correctAnswer in _correctAnswers:
if str(_gformLine.loc[question])\
.startswith(str(_correctAnswer)):
_binarized.loc[question] = 1
break
_slicedBinarized = _binarized.loc[_notEmptyIndexes]
_slicedBinarized
_slicedBinarized.loc['What are BioBricks and devices?']
allBinarized = getAllBinarized()
plotCorrelationMatrix(allBinarized)
source
source = correctAnswers + demographicAnswers
notEmptyIndexes = []
for eltIndex in source.index:
#print(eltIndex)
if(len(source.loc[eltIndex]) > 0):
notEmptyIndexes.append(eltIndex)
len(source)-len(notEmptyIndexes)
emptyForm = gform[gform[localplayerguidkey] == 'incorrectGUID']
emptyForm
_source = correctAnswers + demographicAnswers
_form = gform #emptyForm
#def getAllBinarized(_source = correctAnswers, _form = gform ):
_notEmptyIndexes = []
for _index in _source.index:
if(len(_source.loc[_index]) > 0):
_notEmptyIndexes.append(_index)
_result = pd.DataFrame(index = _notEmptyIndexes)
for _userId in getAllResponders( _form = _form ):
_corrections = getCorrections(_userId, _source=_source, _form = _form)
_binarized = getBinarizedCorrections(_corrections)
_slicedBinarized =\
_binarized.loc[_notEmptyIndexes][_binarized.columns[\
_binarized.columns.to_series().str.contains(correctionsColumnNameStem)\
]]
_result = pd.concat([_result, _slicedBinarized], axis=1)
_result = _result.T
#_result
if(_result.shape[0] > 0 and _result.shape[1] > 0):
correlation = _result.astype(float).corr()
#plt.matshow(correlation)
sns.clustermap(correlation,cmap=plt.cm.jet,square=True,figsize=(10,10))
#ax = sns.clustermap(correlation,cmap=plt.cm.jet,square=True,figsize=(10,10),cbar_kws={\
#"orientation":"vertical"})
correlation_pearson = _result.T.astype(float).corr(methods[0])
correlation_kendall = _result.T.astype(float).corr(methods[1])
correlation_spearman = _result.T.astype(float).corr(methods[2])
print(correlation_pearson.equals(correlation_kendall))
print(correlation_kendall.equals(correlation_spearman))
diff = (correlation_pearson - correlation_kendall)
flattened = diff[diff > 0.1].values.flatten()
flattened[~np.isnan(flattened)]
correlation
scientificQuestionsLabels = gform.columns[13:40]
scientificQuestionsLabels = [
'In order to modify the abilities of the bacterium, you have to... #1',
'What are BioBricks and devices? #2',
'What is the name of this BioBrick? #3',
'What is the name of this BioBrick?.1 #4',
'What is the name of this BioBrick?.2 #5',
'What is the name of this BioBrick?.3 #6',
'What does this BioBrick do? #7',
'What does this BioBrick do?.1 #8',
'What does this BioBrick do?.2 #9',
'What does this BioBrick do?.3 #10',
'Pick the case where the BioBricks are well-ordered: #11',
'When does green fluorescence happen? #12',
'What happens when you unequip the movement device? #13',
'What is this? #14',
'What does this device do? #15',
'What does this device do?.1 #16',
'What does this device do?.2 #17',
'What does this device do?.3 #18',
'What does this device do?.4 #19',
'What does this device do?.5 #20',
'What does this device do?.6 #21',
'What does this device do?.7 #22',
'Guess: what would a device producing l-arabinose do, if it started with a l-arabinose-induced promoter? #23',
'Guess: the bacterium would glow yellow... #24',
'What is the species of the bacterium of the game? #25',
'What is the scientific name of the tails of the bacterium? #26',
'Find the antibiotic: #27',
]
scientificQuestionsLabelsX = [
'#1 In order to modify the abilities of the bacterium, you have to...',
'#2 What are BioBricks and devices?',
'#3 What is the name of this BioBrick?',
'#4 What is the name of this BioBrick?.1',
'#5 What is the name of this BioBrick?.2',
'#6 What is the name of this BioBrick?.3',
'#7 What does this BioBrick do?',
'#8 What does this BioBrick do?.1',
'#9 What does this BioBrick do?.2',
'#10 What does this BioBrick do?.3',
'#11 Pick the case where the BioBricks are well-ordered:',
'#12 When does green fluorescence happen?',
'#13 What happens when you unequip the movement device?',
'#14 What is this?',
'#15 What does this device do?',
'#16 What does this device do?.1',
'#17 What does this device do?.2',
'#18 What does this device do?.3',
'#19 What does this device do?.4',
'#20 What does this device do?.5',
'#21 What does this device do?.6',
'#22 What does this device do?.7',
'Guess: what would a device producing l-arabinose do, if it started with a l-arabinose-induced p#23 romoter?',
'#24 Guess: the bacterium would glow yellow...',
'#25 What is the species of the bacterium of the game?',
'#26 What is the scientific name of the tails of the bacterium?',
'#27 Find the antibiotic:',
]
questionsLabels = scientificQuestionsLabels
questionsLabelsX = scientificQuestionsLabelsX
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
ax.set_yticklabels(['']+questionsLabels)
ax.set_xticklabels(['']+questionsLabelsX, rotation='vertical')
ax.matshow(correlation)
ax.set_xticks(np.arange(-1,len(questionsLabels),1.));
ax.set_yticks(np.arange(-1,len(questionsLabels),1.));
questionsLabels = correlation.columns.copy()
newLabels = []
for index in range(0, len(questionsLabels)):
newLabels.append(questionsLabels[index] + ' #' + str(index + 1))
correlationRenamed = correlation.copy()
correlationRenamed.columns = newLabels
correlationRenamed.index = newLabels
correlationRenamed
correlationRenamed = correlation.copy()
correlationRenamed.columns = pd.Series(correlation.columns).apply(lambda x: x + ' #' + str(correlation.columns.get_loc(x) + 1))
correlationRenamed.index = correlationRenamed.columns
correlationRenamed
correlation.shape
fig = plt.figure(figsize=(10,10))
ax12 = plt.subplot(111)
ax12.set_title('Heatmap')
sns.heatmap(correlation,ax=ax12,cmap=plt.cm.jet,square=True)
ax = sns.clustermap(correlation,cmap=plt.cm.jet,square=True,figsize=(10,10),cbar_kws={\
"orientation":"vertical"})
questionsLabels = pd.Series(correlation.columns).apply(lambda x: x + ' #' + str(correlation.columns.get_loc(x) + 1))
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111)
cmap=plt.cm.jet
#cmap=plt.cm.ocean
cax = ax.imshow(correlation, interpolation='nearest', cmap=cmap,
# extent=(0.5,np.shape(correlation)[0]+0.5,0.5,np.shape(correlation)[1]+0.5)
)
#ax.grid(True)
plt.title('Questions\' Correlations')
ax.set_yticklabels(questionsLabels)
ax.set_xticklabels(questionsLabels, rotation='vertical')
ax.set_xticks(np.arange(len(questionsLabels)));
ax.set_yticks(np.arange(len(questionsLabels)));
#ax.set_xticks(np.arange(-1,len(questionsLabels),1.));
#ax.set_yticks(np.arange(-1,len(questionsLabels),1.));
fig.colorbar(cax)
plt.show()
ax.get_xticks()
transposed = _result.T.astype(float)
transposed.head()
transposed.corr()
transposed.columns = range(0,len(transposed.columns))
transposed.index = range(0,len(transposed.index))
transposed.head()
transposed = transposed.iloc[0:10,0:3]
transposed
transposed = transposed.astype(float)
type(transposed[0][0])
transposed.columns = list('ABC')
transposed
transposed.loc[0, 'A'] = 0
transposed
transposed.corr()
round(7.64684)
df = pd.DataFrame(10*np.random.randint(2, size=[20,2]),index=range(0,20),columns=list('AB'))
#df.columns = range(0,len(df.columns))
df.head()
#type(df[0][0])
type(df.columns)
df.corr()
#corr = pd.Series({}, index = methods)
for meth in methods:
#corr[meth] = result.corr(method = meth)
print(meth + ":\n" + str(transposed.corr(method = meth)) + "\n\n")
befores = gform.copy()
befores = befores[befores[QTemporality] == answerTemporalities[0]]
print(len(befores))
allBeforesBinarized = getAllBinarized( _source = correctAnswers + demographicAnswers, _form = befores)
np.unique(allBeforesBinarized.values.flatten())
allBeforesBinarized.columns[20]
allBeforesBinarized.T.dot(allBeforesBinarized)
np.unique(allBeforesBinarized.iloc[:,20].values)
plotCorrelationMatrix( allBeforesBinarized, _abs=False,\
_clustered=False, _questionNumbers=True )
_correlation = allBeforesBinarized.astype(float).corr()
overlay = allBeforesBinarized.T.dot(allBeforesBinarized).astype(int)
_correlation.columns = pd.Series(_correlation.columns).apply(\
lambda x: x + ' #' + str(_correlation.columns.get_loc(x) + 1))
_correlation.index = _correlation.columns
_correlation = _correlation.abs()
_fig = plt.figure(figsize=(20,20))
_ax = plt.subplot(111)
#sns.heatmap(_correlation,ax=_ax,cmap=plt.cm.jet,square=True,annot=overlay,fmt='d')
sns.heatmap(_correlation,ax=_ax,cmap=plt.cm.jet,square=True,annot=True)
afters = gform.copy()
afters = afters[afters[QTemporality] == answerTemporalities[1]]
print(len(afters))
allAftersBinarized = getAllBinarized( _source = correctAnswers + demographicAnswers, _form = afters)
np.unique(allAftersBinarized.values.flatten())
plotCorrelationMatrix( allAftersBinarized, _abs=False,\
_clustered=False, _questionNumbers=True )
#for answerIndex in range(0,len(allAftersBinarized)):
# print(str(answerIndex) + " " + str(allAftersBinarized.iloc[answerIndex,0]))
allAftersBinarized.iloc[28,0]
len(allAftersBinarized)
len(allAftersBinarized.index)
_correlation = allAftersBinarized.astype(float).corr()
overlay = allAftersBinarized.T.dot(allAftersBinarized).astype(int)
_correlation.columns = pd.Series(_correlation.columns).apply(\
lambda x: x + ' #' + str(_correlation.columns.get_loc(x) + 1))
_correlation.index = _correlation.columns
_fig = plt.figure(figsize=(10,10))
_ax = plt.subplot(111)
#sns.heatmap(_correlation,ax=_ax,cmap=plt.cm.jet,square=True,annot=overlay,fmt='d')
sns.heatmap(_correlation,ax=_ax,cmap=plt.cm.jet,square=True)
crossCorrect = getCrossCorrectAnswers(allAftersBinarized)
pd.Series((overlay == crossCorrect).values.flatten()).unique()
allAftersBinarized.shape
cross = allAftersBinarized.T.dot(allAftersBinarized)
cross.shape
equal = (cross == crossCorrect)
type(equal)
pd.Series(equal.values.flatten()).unique()
testUser = userIdAnswersFR
gform[gform[localplayerguidkey] == testUser].T
getScore(testUser)
print("draft test")
testUserId = "3ef14300-4987-4b54-a56c-5b6d1f8a24a1"
testUserId = userIdAnswersEN
#def getScore( _userId, _form = gform ):
score = pd.DataFrame({}, columns = answerTemporalities)
score.loc['score',:] = np.nan
for column in score.columns:
score.loc['score', column] = []
if hasAnswered( testUserId ):
columnAnswers = getCorrections(testUserId)
for columnName in columnAnswers.columns:
# only work on corrected columns
if correctionsColumnNameStem in columnName:
answerColumnName = columnName.replace(correctionsColumnNameStem,\
answersColumnNameStem)
temporality = columnAnswers.loc[QTemporality,answerColumnName]
counts = (columnAnswers[columnName]).value_counts()
thisScore = 0
if(True in counts):
thisScore = counts[True]
score.loc['score',temporality].append(thisScore)
else:
print("user " + str(testUserId) + " has never answered")
#expectedScore = 18
#if (expectedScore != score[0]):
# print("ERROR incorrect score: expected "+ str(expectedScore) +", got "+ str(score))
score
score = pd.DataFrame({}, columns = answerTemporalities)
score.loc['score',:] = np.nan
for column in score.columns:
score.loc['score', column] = []
score
#score.loc['user0',:] = [1,2,3]
#score
#type(score)
#type(score[0])
#for i,v in score[0].iteritems():
# print(v)
#score[0][answerTemporalities[2]]
#columnAnswers.loc[QTemporality,'answers0']
False in (columnAnswers[columnName]).value_counts()
getScore("3ef14300-4987-4b54-a56c-5b6d1f8a24a1")
#gform[gform[localplayerguidkey]=="3ef14300-4987-4b54-a56c-5b6d1f8a24a1"].T
correctAnswers
#questionnaireValidatedCheckpointsPerQuestion = pd.Series(np.nan, index=range(35))
questionnaireValidatedCheckpointsPerQuestion = pd.Series(np.nan, index=range(len(checkpointQuestionMatching)))
questionnaireValidatedCheckpointsPerQuestion.head()
checkpointQuestionMatching['checkpoint'][19]
userId = localplayerguid
_form = gform
#function that returns the list of checkpoints from user id
#def getValidatedCheckpoints( userId, _form = gform ):
_validatedCheckpoints = []
if hasAnswered( userId, _form = _form ):
_columnAnswers = getCorrections( userId, _form = _form)
for _columnName in _columnAnswers.columns:
# only work on corrected columns
if correctionsColumnNameStem in _columnName:
_questionnaireValidatedCheckpointsPerQuestion = pd.Series(np.nan, index=range(len(checkpointQuestionMatching)))
for _index in range(0, len(_questionnaireValidatedCheckpointsPerQuestion)):
if _columnAnswers[_columnName][_index]==True:
_questionnaireValidatedCheckpointsPerQuestion[_index] = checkpointQuestionMatching['checkpoint'][_index]
else:
_questionnaireValidatedCheckpointsPerQuestion[_index] = ''
_questionnaireValidatedCheckpoints = _questionnaireValidatedCheckpointsPerQuestion.unique()
_questionnaireValidatedCheckpoints = _questionnaireValidatedCheckpoints[_questionnaireValidatedCheckpoints!='']
_questionnaireValidatedCheckpoints = pd.Series(_questionnaireValidatedCheckpoints)
_questionnaireValidatedCheckpoints = _questionnaireValidatedCheckpoints.sort_values()
_questionnaireValidatedCheckpoints.index = range(0, len(_questionnaireValidatedCheckpoints))
_validatedCheckpoints.append(_questionnaireValidatedCheckpoints)
else:
print("user " + str(userId) + " has never answered")
result = pd.Series(data=_validatedCheckpoints)
result
type(result[0])
testSeries1 = pd.Series(
[
'tutorial1.Checkpoint00',
'tutorial1.Checkpoint01',
'tutorial1.Checkpoint02',
'tutorial1.Checkpoint05'
]
)
testSeries2 = pd.Series(
[
'tutorial1.Checkpoint01',
'tutorial1.Checkpoint05'
]
)
np.setdiff1d(testSeries1, testSeries2)
np.setdiff1d(testSeries1.values, testSeries2.values)
getAnswers(localplayerguid).head(2)
getCorrections(localplayerguid).head(2)
getScore(localplayerguid)
getValidatedCheckpoints(localplayerguid)
getNonValidatedCheckpoints(localplayerguid)
qPlayedHerocoliIndex = 10
qPlayedHerocoliYes = ['Yes', 'Once', 'Multiple times', 'Oui',
'De nombreuses fois', 'Quelques fois', 'Une fois']
questionIndex = qPlayedHerocoliIndex
choice = qPlayedHerocoliYes
_form = gform
# returns all rows of Google form's answers that contain an element
# of the array 'choice' for question number 'questionIndex'
#def getAllAnswerRows(questionIndex, choice, _form = gform ):
_form[_form.iloc[:, questionIndex].isin(choice)]
_df = getAllAnswerRows(qPlayedHerocoliIndex, qPlayedHerocoliYes, _form = gform )
#def getPercentCorrectPerColumn(_df):
_count = len(_df)
_percents = pd.Series(np.full(len(_df.columns), np.nan), index=_df.columns)
for _rowIndex in _df.index:
for _columnName in _df.columns:
_columnIndex = _df.columns.get_loc(_columnName)
if ((_columnIndex >= firstEvaluationQuestionIndex) \
and (_columnIndex < len(_df.columns)-3)):
if(str(_df[_columnName][_rowIndex]).startswith(str(correctAnswers[_columnIndex]))):
if (np.isnan(_percents[_columnName])):
_percents[_columnName] = 1;
else:
_percents[_columnName] = _percents[_columnName]+1
else:
if (np.isnan(_percents[_columnName])):
_percents[_columnName] = 0;
_percents = _percents/_count
_percents['Count'] = _count
_percents
print('\n\n\npercents=\n' + str(_percents))
questionIndex = qPlayedHerocoliIndex
choice = qPlayedHerocoliYes
_form = gform
#def getPercentCorrectKnowingAnswer(questionIndex, choice, _form = gform):
_answerRows = getAllAnswerRows(questionIndex, choice, _form = _form);
getPercentCorrectPerColumn(_answerRows)
#localplayerguid = '8d352896-a3f1-471c-8439-0f426df901c1'
#localplayerguid = '7037c5b2-c286-498e-9784-9a061c778609'
#localplayerguid = '5c4939b5-425b-4d19-b5d2-0384a515539e'
#localplayerguid = '7825d421-d668-4481-898a-46b51efe40f0'
#localplayerguid = 'acb9c989-b4a6-4c4d-81cc-6b5783ec71d8'
for id in getAllResponders():
print("===========================================")
print("id=" + str(id))
print("-------------------------------------------")
print(getAnswers(id).head(2))
print("-------------------------------------------")
print(getCorrections(id).head(2))
print("-------------------------------------------")
print("scores=" + str(getScore(id)))
print("#ValidatedCheckpoints=" + str(getValidatedCheckpointsCounts(id)))
print("#NonValidatedCheckpoints=" + str(getNonValidatedCheckpointsCounts(id)))
print("===========================================")
gform[localplayerguidkey]
hasAnswered( '8d352896-a3f1-471c-8439-0f426df901c1' )
'8d352896-a3f1-471c-8439-0f426df901c1' in gform[localplayerguidkey].values
apostropheTestString = 'it\'s a test'
apostropheTestString
#gformEN.head(2)
#gformFR.head(2)
#gformEN[QLanguage] = pd.Series(enLanguageID, index=gformEN.index)
#gformFR[QLanguage] = pd.Series(frLanguageID, index=gformFR.index)
#gformFR.head(2)
# rename columns
#gformFR.columns = gformEN.columns
#gformFR.head(2)
#gformTestMerge = pd.concat([gformEN, gformFR])
#gformTestMerge.head(2)
#gformTestMerge.tail(2)
gform
localplayerguid
someAnswers = getAnswers( '8ca16c7a-70a6-4723-bd72-65b8485a2e86' )
someAnswers
testQuestionIndex = 24
thisUsersFirstEvaluationQuestion = str(someAnswers[someAnswers.columns[0]][testQuestionIndex])
thisUsersFirstEvaluationQuestion
someAnswers[someAnswers.columns[0]][QLanguage]
firstEvaluationQuestionCorrectAnswer = str(correctAnswers[testQuestionIndex])
firstEvaluationQuestionCorrectAnswer
thisUsersFirstEvaluationQuestion.startswith(firstEvaluationQuestionCorrectAnswer)
answerDate = gform[gform['userId'] == '51f1ef77-ec48-4976-be1f-89b7cbd1afab'][QTimestamp][0]
answerDate
allEvents = rmdf1522[rmdf1522['userId']=='51f1ef77-ec48-4976-be1f-89b7cbd1afab']
allEventsCount = len(allEvents)
eventsBeforeRatio = len(allEvents[allEvents['userTime'] > answerDate])/allEventsCount
eventsAfterRatio = len(allEvents[allEvents['userTime'] < answerDate])/allEventsCount
result = [eventsBeforeRatio, eventsAfterRatio]
result
len(gform)
len(gform[gform[QTemporality] == answerTemporalities[2]])
len(gform[gform[QTemporality] == answerTemporalities[0]])
len(gform[gform[QTemporality] == answerTemporalities[1]])
gform.loc[:, [QPlayed, 'userId', QTemporality, QTimestamp]].sort_values(by = ['userId', QTimestamp])
gform.loc[:, [QPlayed, 'userId', QTemporality, QTimestamp]].sort_values(by = ['userId', QTimestamp])
sortedGFs = gform.loc[:, [QPlayed, 'userId', QTemporality, QTimestamp]].sort_values(by = ['userId', QTimestamp])
sortedGFs[sortedGFs[QTemporality] == answerTemporalities[2]]
result = pd.DataFrame()
maxuserIdIndex = len(sortedGFs['userId'])
userIdIndex = 0
userIdIntProgress = IntProgress(
value=0,
min=0,
max=maxuserIdIndex,
description='userIdIndex:'
)
display(userIdIntProgress)
userIdText = Text('')
display(userIdText)
for userid in sortedGFs['userId']:
userIdIndex += 1
userIdIntProgress.value = userIdIndex
userIdText.value = userid
if (len(sortedGFs[sortedGFs['userId'] == userid]) >= 2) and (answerTemporalities[2] in sortedGFs[sortedGFs['userId'] == userid][QTemporality].values):
if len(result) == 0:
result = sortedGFs[sortedGFs['userId'] == userid]
else:
result = pd.concat([result, sortedGFs[sortedGFs['userId'] == userid]])
#print(sortedGFs[sortedGFs['userId'] == userid])
result
len(gform) - len(result)
len(gform[gform[QTemporality] == answerTemporalities[2]])
len(gform[gform[QTemporality] == answerTemporalities[0]])
len(gform[gform[QTemporality] == answerTemporalities[1]])
gform.loc[:, [QPlayed, 'userId', QTemporality, QTimestamp]].sort_values(by = ['userId', QTimestamp])
rmdf1522['userTime'].min(),gform[QTimestamp].min(),rmdf1522['userTime'].min().floor('d') == gform[QTimestamp].min().floor('d')
# code to find special userIds
enSpeakers = gform[gform[QLanguage]==enLanguageID]
frSpeakers = gform[gform[QLanguage]==frLanguageID]
sortedGFs = gform.loc[:, ['userId', QTemporality, QTimestamp, QLanguage]].sort_values(by = ['userId', QTimestamp])
foundUserIDThatDidNotAnswer = False
foundUserID1AnswerEN = False
foundUserIDAnswersEN = False
foundUserID1ScoreEN = False
foundUserIDScoresEN = False
foundUserID1AnswerFR = False
foundUserIDAnswersFR = False
foundUserID1ScoreFR = False
foundUserIDScoresFR = False
foundUserIDAnswersENFR = False
maxuserIdIndex = len(sortedGFs['userId'])
userIdIndex = 0
userIdIntProgress = IntProgress(
value=0,
min=0,
max=maxuserIdIndex,
description='userIdIndex:'
)
display(userIdIntProgress)
userIdText = Text('')
display(userIdText)
# survey1522startDate = Timestamp('2018-03-24 12:00:00.000000+0000', tz='UTC')
survey1522startDate = gform[QTimestamp].min().floor('d')
if (rmdf1522['userTime'].min().floor('d') != gform[QTimestamp].min().floor('d')):
print("rmdf and gform first date don't match")
for userId in rmdf1522[rmdf1522['userTime'] >= survey1522startDate]['userId']:
if userId not in sortedGFs['userId'].values:
print("userIdThatDidNotAnswer = '" + userId + "'")
foundUserIDThatDidNotAnswer = True
break
for userId in sortedGFs['userId']:
userIdIndex += 1
userIdIntProgress.value = userIdIndex
userIdText.value = userId
answers = sortedGFs[sortedGFs['userId'] == userId]
if not foundUserID1AnswerEN and (len(answers) == 1) and (answers[QLanguage].unique() == [enLanguageID]):
print("userId1AnswerEN = '" + userId + "'")
print("userId1ScoreEN = '" + userId + "'")
foundUserID1AnswerEN = True
foundUserID1ScoreEN = True
if not foundUserIDAnswersEN and (len(answers) >= 2) and (answers[QLanguage].unique() == [enLanguageID]):
print("userIdAnswersEN = '" + userId + "'")
print("userIdScoresEN = '" + userId + "'")
foundUserIDAnswersEN = True
foundUserIDScoresEN = True
# if not foundUserID1ScoreEN and :
# print("userId1ScoreEN = '" + userId + "'")
# foundUserID1ScoreEN = True
# if not foundUserIDScoresEN and :
# print("userIdScoresEN = '" + userId + "'")
# foundUserIDScoresEN = True
if not foundUserID1AnswerFR and (len(answers) == 1) and (answers[QLanguage].unique() == [frLanguageID]):
print("userId1AnswerFR = '" + userId + "'")
print("userId1ScoreFR = '" + userId + "'")
foundUserID1AnswerFR = True
foundUserID1ScoreFR = True
if not foundUserIDAnswersFR and (len(answers) >= 2) and (answers[QLanguage].unique() == [frLanguageID]):
print("userIdAnswersFR = '" + userId + "'")
print("userIdScoresFR = '" + userId + "'")
foundUserIDAnswersFR = True
foundUserIDScoresFR = True
# if not foundUserID1ScoreFR and :
# print("userId1ScoreFR = '" + userId + "'")
# foundUserID1ScoreFR = True
# if not foundUserIDScoresFR and :
# print("userIdScoresFR = '" + userId + "'")
# foundUserIDScoresFR = True
if not foundUserIDAnswersENFR and (len(answers) >= 2) and (enLanguageID in answers[QLanguage].unique()) and (frLanguageID in answers[QLanguage].unique()):
print("userIdAnswersENFR = '" + userId + "'")
foundUserIDAnswersENFR = True
answers
answerDate = gform[gform['userId'] == '51f1ef77-ec48-4976-be1f-89b7cbd1afab'][QTimestamp][0]
answerDate
getEventCountRatios(answerDate, '51f1ef77-ec48-4976-be1f-89b7cbd1afab')
allEvents = rmdf1522[rmdf1522['userId']=='51f1ef77-ec48-4976-be1f-89b7cbd1afab']
allEventsCount = len(allEvents)
eventsBeforeRatio = len(allEvents[allEvents['userTime'] < answerDate])/allEventsCount
eventsAfterRatio = len(allEvents[allEvents['userTime'] > answerDate])/allEventsCount
result = [eventsBeforeRatio, eventsAfterRatio]
result
[answerDate, allEvents.loc[:, ['userTime']].iloc[0], allEvents.loc[:, ['userTime']].iloc[-1]]
gform[gform['userId'] == '51f1ef77-ec48-4976-be1f-89b7cbd1afab'][QTemporality].iloc[0]
userId = '51f1ef77-ec48-4976-be1f-89b7cbd1afab'
answerDate = gform[gform['userId'] == userId][QTimestamp][0]
[eventsBeforeRatio, eventsAfterRatio] = getEventCountRatios(answerDate, userId)
[eventsBeforeRatio, eventsAfterRatio]
# code to find currently-sorted-as-posttest answers that have nan answers to content questions
QQ = QBioBricksDevicesComposition
for answerIndex in gform.index:
if gform.loc[answerIndex, QTemporality] == answerTemporalities[1]:
if pd.isnull(gform.loc[answerIndex,QQ]):
print(answerIndex)
# code to find which answers have both already played but also filled in profile questions
answersPlayedButProfile = []
for answerIndex in gform.index:
if gform.loc[answerIndex, QTemporality] == answerTemporalities[1]:
if ~pd.isnull(gform.iloc[answerIndex, QAge]):
answersPlayedButProfile.append(answerIndex)
gform.loc[answersPlayedButProfile, QPlayed]
userId = gform.loc[54, 'userId']
thisUserIdsAnswers = gform[gform['userId'] == userId]
thisUserIdsAnswers[thisUserIdsAnswers[QTemporality] == answerTemporalities[0]][QAge].values[0]
gform[gform[QTemporality] == answerTemporalities[0]][QAge].unique()
# pretest ages
ages = gform[(gform[QTemporality] == answerTemporalities[0])][QAge].unique()
ages.sort()
ages
# the answers that are a problem for the analysis
AUnclassifiable = 'I played recently on an other computer'
#_gformDF[(_gformDF[QTemporality] == answerTemporalities[1]) & (_gformDF[QAge].apply(type) == str)]
gform[gform[QPlayed] == AUnclassifiable]
# various tests around setPosttestsProfileInfo
len(_gformDF[pd.isnull(_gformDF[QAge])])/len(_gformDF)
_gformDF[pd.isnull(_gformDF[QAge])][QTemporality].unique()
_gformDF[_gformDF[QTemporality] == answerTemporalities[1]][QAge].unique()
nullAge = _gformDF[pd.isnull(_gformDF[QAge])]['userId']
nullAge = _gformDF[_gformDF['userId'].isin(nullAge)]
len(nullAge)
nullAge.sort_values(QPlayed)
dates = np.unique(nullAge[QTimestamp].apply(pd.Timestamp.date).values)
dates.sort()
dates
nullAge[QTimestamp].apply(pd.Timestamp.date).value_counts().sort_index()
len(nullAge['userId'].unique())/len(gform['userId'].unique())
pretestIds = _gformDF[_gformDF[QTemporality] == answerTemporalities[0]]['userId']
posttestIds = _gformDF[_gformDF[QTemporality] == answerTemporalities[1]]['userId']
posttestsWithoutPretests = posttestIds[~posttestIds.isin(pretestIds)]
pretestsWithoutPosttests = pretestIds[~pretestIds.isin(posttestIds)]
len(posttestsWithoutPretests), len(posttestIds), len(pretestsWithoutPosttests), len(pretestIds)
intersectionIds1 = pretestIds[pretestIds.isin(posttestIds)]
intersectionIds2 = posttestIds[posttestIds.isin(pretestIds)]
_gformDF.loc[intersectionIds2.index]
len(gform) - len(getWithoutIncompleteAnswers())
_gformDF2.iloc[_gformDF2.index[pd.isnull(_gformDF2[_gformDF2.columns[survey1522DF[profileColumn]]].T).any()]]
withoutIncompleteAnswers = getWithoutIncompleteAnswers()
len(gform) - len(withoutIncompleteAnswers)
len(getWithoutIncompleteAnswers())
# tests for getPerfectPretestPostestPairs
'29b739fc-4f9f-4f5e-bfee-8ba12de4b7fa' in testUsers
_gformDF3 = getWithoutIncompleteAnswers(gform)
sortedPosttests = _gformDF3[_gformDF3[QTemporality] == answerTemporalities[1]]['userId'].value_counts()
posttestDuplicatesUserIds = sortedPosttests[sortedPosttests > 1].index
_gformDF4 = _gformDF3[_gformDF3['userId'].isin(posttestDuplicatesUserIds)].drop_duplicates(subset=['userId', QTemporality], keep='first')
_gformDF5 = _gformDF3.sort_values(['userId', QTimestamp]).drop_duplicates(subset=['userId', QTemporality], keep='first')
len(gform),len(_gformDF3),len(_gformDF4),len(_gformDF5)
gform[gform['userId'].isin(posttestDuplicatesUserIds)][[QTimestamp, 'userId', QTemporality]].sort_values(['userId', QTimestamp])
gform.iloc[getPosttestsWithoutPretests(gform)][[QTimestamp, 'userId', QTemporality]].sort_values(['userId', QTimestamp])
# tests for getPerfectPretestPostestPairs
_gformDF = gform
_gformDF2 = getWithoutIncompleteAnswers(_gformDF)
vc = _gformDF2['userId'].value_counts()
vc[vc == 1]
# remove ulterior pretests and posttests
_gformDF3 = _gformDF2.sort_values(['userId', QTimestamp]).drop_duplicates(subset=['userId', QTemporality], keep='first')
vc = _gformDF3['userId'].value_counts()
vc[vc == 1]
# only keep pretests that have matching posttests
posttestIds = _gformDF3[_gformDF3[QTemporality] == answerTemporalities[1]]['userId']
_gformDF4 = _gformDF3.drop(_gformDF3.index[~_gformDF3['userId'].isin(posttestIds)])
vc = _gformDF4['userId'].value_counts()
vc[vc == 1]
vc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 'Google form analysis' functions checks
Step2: hasAnswered
Step3: getAnswers
Step4: getCorrections
Step5: getScore
Step6: code to explore scores
Step7: getValidatedCheckpoints
Step8: getNonValidated
Step9: getNonValidatedCheckpoints
Step10: getValidatedCheckpointsCounts
Step11: getNonValidatedCheckpointsCounts
Step12: getAllAnswerRows
Step13: getPercentCorrectPerColumn
Step14: Google form loading
Step15: Selection of a question
Step16: Selection of a user's answers
Step17: getAllRespondersGFormGUID tinkering
Step18: getRandomGFormGUID tinkering
Step19: getAnswers tinkering
Step20: answer selection
Step21: checking answers
Step22: getTemporality tinkering
Step23: getTemporality tinkering
Step24: getTestAnswers tinkering
Step25: getCorrections tinkering
Step26: getCorrections extensions tinkering
Step27: getBinarizedCorrections tinkering
Step28: getBinarized tinkering
Step29: getAllBinarized tinkering
Step30: plotCorrelationMatrix tinkering
Step31: data = transposed[[0,1]]
Step32: getCrossCorrectAnswers tinkering
Step33: after
Step34: getScore tinkering
Step35: comparison of checkpoints completion and answers
Step36: getNonValidated tinkering
Step37: getAllAnswerRows tinkering
Step38: getPercentCorrectPerColumn tinkering
Step39: getPercentCorrectKnowingAnswer tinkering
Step40: tests on all user Ids, including those who answered more than once
Step41: answers submitted through time
Step42: add language column
Step43: concatenate
Step44: getEventCountRatios tinkering
Step45: setAnswerTemporalities2 tinkering / Temporalities analysis
Step46: question types
|
3,723
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from cvxopt import solvers, matrix
m = matrix( [ [2.0, 1.1] ] ) # mettre des réels (float) et non des entiers
# cvxopt ne fait pas de conversion implicite
t = m.T # la transposée
t.size # affiche les dimensions de la matrice
from cvxopt import solvers, matrix, spdiag, log
def acent(A, b):
m, n = A.size
def F(x=None, z=None):
if x is None:
# l'algorithme fonctionne de manière itérative
# il faut choisir un x initial, c'est ce qu'on fait ici
return 0, matrix(1.0, (n,1))
if min(x) <= 0.0:
return None # cas impossible
# ici commence le code qui définit ce qu'est une itération
f = -sum(log(x))
Df = -(x**-1).T
if z is None: return f, Df
H = spdiag(z[0] * x**(-2))
return f, Df, H
return solvers.cp(F, A=A, b=b)['x']
A = matrix ( [[1.0,2.0]] ).T
b = matrix ( [[ 1.0 ]] )
print(acent(A,b))
# il existe un moyen d'éviter l'affichage des logs (pratique si on doit faire
# un grand nombre d'optimisation)
from cvxopt import solvers
solvers.options['show_progress'] = False
print(acent(A,b))
solvers.options['show_progress'] = True
import cvxopt
m = cvxopt.matrix( [[ 0, 1.5], [ 4.5, -6] ] )
print(m)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Le langage Python propose des modules qui permettent de résoudre des problèmes d'optimisation sous contraintes et il n'est pas forcément nécessaire de connaître la théorie derrière les algorithmes de résolution pour s'en servir. Au cours de cette séance, on verra comment faire. Même si comprendre comment utiliser une fonction d'un module tel que cvxopt requiert parfois un peu de temps et de lecture.
Step2: La documentation cvxopt est parfois peu explicite. Il ne faut pas hésiter à regarder les exemples d'abord et à la lire avec attention les lignes qui décrivent les valeurs que doivent prendre chaque paramètre de la fonction. Le plus intéressant pour le cas qui nous intéresse est celui-ci (tiré de la page problems with nonlinear objectives)
Step3: Cet exemple résoud le problème de minimisation suivant
|
3,724
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tables
import matplotlib.pyplot as plt
import seaborn as sns
import pybromo as pbm
print('Numpy version:', np.__version__)
print('PyTables version:', tables.__version__)
print('PyBroMo version:', pbm.__version__)
S = pbm.ParticlesSimulation.from_datafile('016', mode='w')
def em_rates_from_E(em_rate_tot, E_values):
E_values = np.asarray(E_values)
em_rates_a = E_values * em_rate_tot
em_rates_d = em_rate_tot - em_rates_a
k_values = E_values/(1 - E_values)
assert np.allclose((em_rates_a/em_rates_d), k_values)
em_rates = np.hstack([em_rates_a, em_rates_d])
return em_rates
em_rate_tot = 200e3
E_list = np.array([0.01, 0.02, 0.05, 0.1, 0.2, 0.4])
em_rate_list = em_rates_from_E(em_rate_tot, E_list)
em_rate_list
# Get the random state at the end of the diffusion simulation
saved_rs_state = S.traj_group._v_attrs['last_random_state']
pbm.hash_(saved_rs_state)
em_rate_list
rs = np.random.RandomState()
rs.set_state(saved_rs_state)
%%timeit -n1 -r1
for em_rate in em_rate_list:
print(' Emission rate: ', em_rate, flush=True)
S.simulate_timestamps_mix(max_rates=(em_rate,), populations=(slice(0, 20),),
bg_rate=1e3, rs=rs)
%%timeit -n1 -r1
for em_rate in em_rate_list:
print(' Emission rate: ', em_rate, flush=True)
S.simulate_timestamps_mix(max_rates=(em_rate,), populations=(slice(0, 20),),
bg_rate=4e3, rs=rs)
for k in S.ts_store.h5file.root.timestamps._v_children.keys():
if not k.endswith('_par'):
print(k)
ts, ts_par = S.get_timestamps_part('Pop1_P20_Pstart0_max_rate198000cps_BG4000cps_t_1s_rs_8798a6')
ts[:]
bins = np.arange(0, 1, 1e-3)
plt.hist(ts*ts.attrs['clk_p'], bins=bins, histtype='step');
group = '/timestamps'
print('Nodes in in %s:\n' % group)
print(S.ts_store.h5file.get_node(group))
for node in S.ts_store.h5file.get_node(group)._f_list_nodes():
print('\t%s' % node.name)
#print('\t %s' % node.title)
[t for t in S.timestamp_names if 'BG4000cps' in t]
S.ts_store.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Timestamps simulation
Step2: Simulation of the series of emission rates
Step3: Simulate timestamps for background = 1kcps
Step4: Simulate timestamps for background = 4kcps
Step5: Verify the simulation
|
3,725
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erfc
def newfig(title='?', xlabel='?', ylabel='?', xlim=None, ylim=None,
xscale='linear', yscale='linear', size_inches=(14, 8)):
'''Setup a new axis for plotting'''
fig, ax = plt.subplots()
fig.set_size_inches(size_inches)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
ax.grid(True)
return ax
L = 150 # m (strip wirdth)
x = np.linspace(-L/2, L/2, 201) # points, taking left at zero.
kD = 600 # m2/d
S = 0.1 # [-]
a = 1.0 # m, sudden head change at x = -L/2
times = np.linspace(0, 0.5, 11)[1:] # d
ax = newfig('Decay from initial head $a$ to 0 at $x = -L/2$ and at $x = L/5$',
'$x$ [m], $ 0 < x < L $', 'head change [m]', xlim=(-L/2, L/2))
for t in times:
rt = np.sqrt(S / (4 * kD * t))
s = np.zeros_like(x) + a # intiial head
for i in range(20):
s -= a * erfc(((2 * i + 0.5) * L + x) * rt)
s -= a * erfc(((2 * i + 0.5) * L - x) * rt)
if i > 0:
s += a * erfc(((2 * i - 0.5) * L - x) * rt)
s += a * erfc(((2 * i - 0.5) * L + x) * rt)
ax.plot(x, s, label=f't = {t:5.2f} d')
ax.legend()
b =L/2
ax = newfig('Symmertric solution for head decay in strip', 'x [m]', 'head [h]', xlim=(-b, b))
for t in times:
h = np.zeros_like(x)
for j in range(1,20):
h += a * 4 / np.pi * ((-1)**(j-1) / (2 * j - 1) *
np.cos((2 * j - 1) * np.pi / 2 * x / b) *
np.exp(- (2 * j - 1)**2 * (np.pi / 2)**2 * kD /(b**2 * S) * t))
ax.plot(x, h, label=f't={t:.1f}')
ax.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convenience function for setting up a graph
Step2: Super position with $\mathtt{erfc}()$ subtracted from initial head $a$
Step3: Same, thing, now using the analytical solution with the cos and the exp
|
3,726
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import mne
from mne.datasets.sample import data_path
fname = op.join(data_path(), 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(fname).crop(0, 10)
original_level = mne.get_config('MNE_LOGGING_LEVEL', 'INFO')
print(mne.get_config_path())
print(mne.get_config())
mne.set_config('MNE_LOGGING_LEVEL', 'INFO')
print(mne.get_config(key='MNE_LOGGING_LEVEL'))
mne.set_log_level('WARNING')
print(mne.get_config(key='MNE_LOGGING_LEVEL'))
cov = mne.compute_raw_covariance(raw)
cov = mne.compute_raw_covariance(raw, verbose=True)
mne.set_config('MNE_LOGGING_LEVEL', original_level)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MNE-python stores configurations to a folder called .mne in the user's
Step2: These configurations include information like sample data paths and plotter
Step3: We see fields like "MNE_DATASETS_SAMPLE_PATH". As the name suggests, this is
Step4: The default value is now set to INFO. This level will now be used by default
Step5: Notice how the value in the config file was not changed. Logging level of
Step6: Nothing. This means that no warnings were emitted during the computation. If
Step7: As you see there is some info about what the function is doing. The logging
|
3,727
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
rnd = np.random.RandomState(seed=42)
n_data = 16 # number of data points
a_true = 1.255 # randomly chosen truth
b_true = 4.507
# Fill in your solution here
# evaluate the true model at the given x values
y = a_true*x + b_true
# Heteroscedastic Gaussian uncertainties only in y direction
y_err = rnd.uniform(0.1, 0.2, size=n_data) # randomly generate uncertainty for each datum
y = y + rnd.normal(0, y_err) # add noise to y data
plt.errorbar(x, y, y_err, marker='o', linestyle='none')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.tight_layout()
# FILL IN THESE FUNCTIONS:
def line_model(pars, x):
pass
def weighted_absolute_deviation(pars, x, y, y_err):
pass
def weighted_squared_deviation(pars, x, y, y_err):
pass
_pars = [1., -10.]
_x = np.arange(16)
_y = _x
_yerr = np.ones_like(_x)
truth = np.array([-10., -9., -8., -7., -6., -5., -4., -3., -2., -1., 0., 1., 2., 3., 4., 5.])
assert np.allclose(line_model(_pars, _x), truth), 'Error in line_model() function!'
assert weighted_absolute_deviation(_pars, _x, _y, _yerr) == 160., 'Error in weighted_absolute_deviation() function!'
assert weighted_squared_deviation(_pars, _x, _y, _yerr) == 1600., 'Error in weighted_squared_deviation() function!'
# make a 256x256 grid of parameter values centered on the true values
a_grid = np.linspace(a_true-2., a_true+2, 256)
b_grid = np.linspace(b_true-2., b_true+2, 256)
a_grid,b_grid = np.meshgrid(a_grid, b_grid)
ab_grid = np.vstack((a_grid.ravel(), b_grid.ravel())).T
fig,axes = plt.subplots(1, 2, figsize=(9,5.1), sharex=True, sharey=True)
for i,func in enumerate([weighted_absolute_deviation, weighted_squared_deviation]):
func_vals = np.zeros(ab_grid.shape[0])
for j,pars in enumerate(ab_grid):
func_vals[j] = func(pars, x, y, y_err)
axes[i].pcolormesh(a_grid, b_grid, func_vals.reshape(a_grid.shape),
cmap='Blues', vmin=func_vals.min(), vmax=func_vals.min()+256) # arbitrary scale
axes[i].set_xlabel('$a$')
# plot the truth
axes[i].plot(a_true, b_true, marker='o', zorder=10, color='#de2d26')
axes[i].axis('tight')
axes[i].set_title(func.__name__, fontsize=14)
axes[0].set_ylabel('$b$')
fig.tight_layout()
# some tricks to get info we need to plot an ellipse, aligned with
# the eigenvectors of the covariance matrix
eigval,eigvec = np.linalg.eig(pars_Cov)
angle = np.degrees(np.arctan2(eigvec[1,0], eigvec[0,0]))
w,h = 2*np.sqrt(eigval)
from matplotlib.patches import Ellipse
fig,ax = plt.subplots(1, 1, figsize=(5,5))
for n in [1,2]:
ax.add_patch(Ellipse(best_pars, width=n*w, height=n*h, angle=angle,
fill=False, linewidth=3-n, edgecolor='#555555',
label=r'{}$\sigma$'.format(n)))
ax.plot(b_true, a_true, marker='o', zorder=10, label='truth')
ax.plot(best_pars[0], best_pars[1], marker='o', zorder=9, label='estimate')
ax.set_xlabel('$b$')
ax.set_ylabel('$a$')
ax.legend(loc='best')
fig.tight_layout()
class StraightLineModel(object):
def __init__(self, x, y, y_err):
We store the data as attributes of the object so we don't have to
keep passing it in to the methods that compute the probabilities.
self.x = np.asarray(x)
self.y = np.asarray(y)
self.y_err = np.asarray(y_err)
def ln_likelihood(self, pars):
We don't need to pass in the data because we can access it from the
attributes. This is basically the same as the weighted squared
deviation function, but includes the constant normalizations for the
Gaussian likelihood.
N = len(self.y)
dy = self.y - line_model(pars, self.x)
ivar = 1 / self.y_err**2 # inverse-variance
return -0.5 * (N*np.log(2*np.pi) + np.sum(2*np.log(self.y_err)) + np.sum(dy**2 * ivar))
def ln_prior(self, pars):
The prior only depends on the parameters, so we don't need to touch
the data at all. We're going to implement a flat (uniform) prior
over the ranges:
a : [0, 100]
b : [-50, 50]
a, b = pars # unpack parameters
ln_prior_val = 0. # we'll add to this
if a < 0 or a > 100.:
return -np.inf
else:
ln_prior_val += np.log(1E-2) # normalization, log(1/100)
if b < -50 or b > 50.:
return -np.inf
else:
ln_prior_val += np.log(1E-2) # normalization, log(1/100)
return ln_prior_val
def ln_posterior(self, pars):
Up to a normalization constant, the log of the posterior pdf is just
the sum of the log likelihood plus the log prior.
lnp = self.ln_prior(pars)
if np.isinf(lnp): # short-circuit if the prior is infinite (don't bother computing likelihood)
return lnp
lnL = self.ln_likelihood(pars)
lnprob = lnp + lnL
if np.isnan(lnprob):
return -np.inf
return lnprob
def __call__(self, pars):
return self.ln_posterior(pars)
model = StraightLineModel(x, y, y_err)
def evaluate_on_grid(func, a_grid, b_grid, args=()):
a_grid,b_grid = np.meshgrid(a_grid, b_grid)
ab_grid = np.vstack((a_grid.ravel(), b_grid.ravel())).T
func_vals = np.zeros(ab_grid.shape[0])
for j,pars in enumerate(ab_grid):
func_vals[j] = func(pars, *args)
return func_vals.reshape(a_grid.shape)
fig,axes = plt.subplots(1, 3, figsize=(14,5.1), sharex=True, sharey=True)
# make a 256x256 grid of parameter values centered on the true values
a_grid = np.linspace(a_true-5., a_true+5, 256)
b_grid = np.linspace(b_true-5., b_true+5, 256)
ln_prior_vals = evaluate_on_grid(model.ln_prior, a_grid, b_grid)
ln_like_vals = evaluate_on_grid(model.ln_likelihood, a_grid, b_grid)
ln_post_vals = evaluate_on_grid(model.ln_posterior, a_grid, b_grid)
for i,vals in enumerate([ln_prior_vals, ln_like_vals, ln_post_vals]):
axes[i].pcolormesh(a_grid, b_grid, vals,
cmap='Blues', vmin=vals.max()-1024, vmax=vals.max()) # arbitrary scale
axes[0].set_title('log-prior', fontsize=20)
axes[1].set_title('log-likelihood', fontsize=20)
axes[2].set_title('log-posterior', fontsize=20)
for ax in axes:
ax.set_xlabel('$a$')
# plot the truth
ax.plot(a_true, b_true, marker='o', zorder=10, color='#de2d26')
ax.axis('tight')
axes[0].set_ylabel('$b$')
fig.tight_layout()
class StraightLineModelGaussianPrior(StraightLineModel): # verbose names are a good thing!
def ln_prior(self, pars):
a, b = pars # unpack parameters
ln_prior_val = 0. # we'll add to this
# prior on a is a Gaussian with mean, stddev = (3, 0.05)
ln_prior_val += -0.5*(a - 3.)**2/0.05**2 # this is not normalized properly, but that's ok
# prior on b is a Gaussian with mean, stddev = (5.5, 0.05)
ln_prior_val += -0.5*(b - 5.5)**2/0.05**2 # this is not normalized properly, but that's ok
return ln_prior_val
model_Gprior = StraightLineModelGaussianPrior(x, y, y_err)
fig,axes = plt.subplots(1, 3, figsize=(14,5.1), sharex=True, sharey=True)
ln_prior_vals2 = evaluate_on_grid(model_Gprior.ln_prior, a_grid, b_grid)
ln_like_vals2 = evaluate_on_grid(model_Gprior.ln_likelihood, a_grid, b_grid)
ln_post_vals2 = evaluate_on_grid(model_Gprior.ln_posterior, a_grid, b_grid)
for i,vals in enumerate([ln_prior_vals2, ln_like_vals2, ln_post_vals2]):
axes[i].pcolormesh(a_grid, b_grid, vals,
cmap='Blues', vmin=vals.max()-1024, vmax=vals.max()) # arbitrary scale
axes[0].set_title('log-prior', fontsize=20)
axes[1].set_title('log-likelihood', fontsize=20)
axes[2].set_title('log-posterior', fontsize=20)
for ax in axes:
ax.set_xlabel('$a$')
# plot the truth
ax.plot(a_true, b_true, marker='o', zorder=10, color='#de2d26')
ax.axis('tight')
axes[0].set_ylabel('$b$')
fig.tight_layout()
def sample_proposal(*sigmas):
return np.random.normal(0., sigmas)
def run_metropolis_hastings(p0, n_steps, model, proposal_sigmas):
Run a Metropolis-Hastings MCMC sampler to generate samples from the input
log-posterior function, starting from some initial parameter vector.
Parameters
----------
p0 : iterable
Initial parameter vector.
n_steps : int
Number of steps to run the sampler for.
model : StraightLineModel instance (or subclass)
A callable object that takes a parameter vector and computes
the log of the posterior pdf.
proposal_sigmas : list, array
A list of standard-deviations passed to the sample_proposal
function. These are like step sizes in each of the parameters.
p0 = np.array(p0)
if len(proposal_sigmas) != len(p0):
raise ValueError("Proposal distribution should have same shape as parameter vector.")
# the objects we'll fill and return:
chain = np.zeros((n_steps, len(p0))) # parameter values at each step
ln_probs = np.zeros(n_steps) # log-probability values at each step
# we'll keep track of how many steps we accept to compute the acceptance fraction
n_accept = 0
# evaluate the log-posterior at the initial position and store starting position in chain
ln_probs[0] = model(p0)
chain[0] = p0
# loop through the number of steps requested and run MCMC
for i in range(1,n_steps):
# proposed new parameters
step = sample_proposal(*proposal_sigmas)
new_p = chain[i-1] + step
# compute log-posterior at new parameter values
new_ln_prob = model(new_p)
# log of the ratio of the new log-posterior to the previous log-posterior value
ln_prob_ratio = new_ln_prob - ln_probs[i-1]
if (ln_prob_ratio > 0) or (ln_prob_ratio > np.log(np.random.uniform())):
chain[i] = new_p
ln_probs[i] = new_ln_prob
n_accept += 1
else:
chain[i] = chain[i-1]
ln_probs[i] = ln_probs[i-1]
acc_frac = n_accept / n_steps
return chain, ln_probs, acc_frac
p0 = [6.,6.]
chain,_,acc_frac = run_metropolis_hastings(p0, n_steps=8192, model=model,
proposal_sigmas=[0.05,0.05])
print("Acceptance fraction: {:.1%}".format(acc_frac))
fig,ax = plt.subplots(1, 1, figsize=(5,5))
ax.pcolormesh(a_grid, b_grid, ln_post_vals,
cmap='Blues', vmin=ln_post_vals.max()-128, vmax=ln_post_vals.max()) # arbitrary scale
ax.axis('tight')
fig.tight_layout()
ax.plot(a_true, b_true, marker='o', zorder=10, color='#de2d26')
ax.plot(chain[:512,0], chain[:512,1], marker='', color='k', linewidth=1.)
ax.set_xlabel('$a$')
ax.set_ylabel('$b$')
fig,axes = plt.subplots(len(p0), 1, figsize=(5,7), sharex=True)
for i in range(len(p0)):
axes[i].plot(chain[:,i], marker='', drawstyle='steps')
axes[0].axhline(a_true, color='r', label='true')
axes[0].legend(loc='best')
axes[0].set_ylabel('$a$')
axes[1].axhline(b_true, color='r')
axes[1].set_ylabel('$b$')
fig.tight_layout()
good_samples = chain[2000::8]
good_samples.shape
low,med,hi = np.percentile(good_samples, [16, 50, 84], axis=0)
upper, lower = hi-med, med-low
disp_str = ""
for i,name in enumerate(['a', 'b']):
fmt_str = '{name}={val:.2f}^{{+{plus:.2f}}}_{{-{minus:.2f}}}'
disp_str += fmt_str.format(name=name, val=med[i], plus=upper[i], minus=lower[i])
disp_str += r'\quad '
from IPython import display
disp_str = "${}$".format(disp_str)
display.Latex(data=disp_str)
a_true, b_true
plt.figure(figsize=(6,5))
plt.errorbar(x, y, y_err, linestyle='none', marker='o', ecolor='#666666')
x_grid = np.linspace(x.min()-0.1, x.max()+0.1, 128)
for pars in good_samples[:128]: # only plot 128 samples
plt.plot(x_grid, line_model(pars, x_grid),
marker='', linestyle='-', color='#3182bd', alpha=0.1, zorder=-10)
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.tight_layout()
# uncomment and run this line if the import fails:
# !source activate statsseminar; pip install corner
import corner
fig = corner.corner(chain[2000:], bins=32, labels=['$a$', '$b$'], truths=[a_true, b_true])
V_true = 0.5**2
n_data = 42
# we'll keep the same parameters for the line as we used above
x = rnd.uniform(0, 2., n_data)
x.sort() # sort the values in place
y = a_true*x + b_true
# Heteroscedastic Gaussian uncertainties only in y direction
y_err = rnd.uniform(0.1, 0.2, size=n_data) # randomly generate uncertainty for each datum
# add Gaussian intrinsic width
y = rnd.normal(y, np.sqrt(y_err**2 + V_true)) # re-sample y data with noise and intrinsic scatter
plt.errorbar(x, y, y_err, linestyle='none', marker='o', ecolor='#666666')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.tight_layout()
X = np.vander(x, N=2, increasing=True)
Cov = np.diag(y_err**2)
Cinv = np.linalg.inv(Cov)
best_pars = np.linalg.inv(X.T @ Cinv @ X) @ (X.T @ Cinv @ y)
pars_Cov = np.linalg.inv(X.T @ Cinv @ X)
plt.errorbar(x, y, y_err, linestyle='none', marker='o', ecolor='#666666')
x_grid = np.linspace(x.min()-0.1, x.max()+0.1, 128)
plt.plot(x_grid, line_model(best_pars[::-1], x_grid), marker='', linestyle='-', label='best-fit line')
plt.plot(x_grid, line_model([a_true, b_true], x_grid), marker='', linestyle='-', label='true line')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.legend(loc='best')
plt.tight_layout()
pars_Cov
class StraightLineIntrinsicScatterModel(StraightLineModel):
def ln_prior(self, pars):
The prior only depends on the parameters
a, b, lnV = pars
# flat priors on a, b, lnV
if a < -10 or a > 10 or b < -100. or b > 100. or lnV < -10. or lnV > 10.:
return -np.inf
# this is only valid up to a numerical constant
return 0.
def ln_likelihood(self, pars):
The likelihood function evaluation requires a particular set of model parameters and the data
a,b,lnV = pars
V = np.exp(lnV)
N = len(y)
dy = y - line_model([a,b], self.x)
ivar = 1 / (self.y_err**2 + V) # inverse-variance now includes intrinsic scatter
return -0.5 * (N*np.log(2*np.pi) - np.sum(np.log(ivar)) + np.sum(dy**2 * ivar))
scatter_model = StraightLineIntrinsicScatterModel(x, y, y_err)
x0 = [5., 5., 0.] # starting guess for the optimizer
# we have to minimize the negative log-likelihood to maximize the likelihood
result_ml_scatter = minimize(lambda *args: -scatter_model.ln_likelihood(*args),
x0=x0, method='BFGS')
result_ml_scatter
plt.errorbar(x, y, y_err, linestyle='none', marker='o', ecolor='#666666')
x_grid = np.linspace(x.min()-0.1, x.max()+0.1, 128)
plt.plot(x_grid, line_model(result_ml_scatter.x[:2], x_grid), marker='', linestyle='-', label='best-fit line')
plt.plot(x_grid, line_model([a_true, b_true], x_grid), marker='', linestyle='-', label='true line')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.legend(loc='best')
plt.tight_layout()
V_true, np.exp(result_ml_scatter.x[2])
p0 = [6., 6., -1.]
chain,_,acc_frac = run_metropolis_hastings(p0, n_steps=2**16, model=scatter_model,
proposal_sigmas=[0.15,0.15,0.2])
acc_frac
fig,axes = plt.subplots(len(p0), 1, figsize=(5,7), sharex=True)
for i in range(len(p0)):
axes[i].plot(chain[:,i], marker='', drawstyle='steps')
axes[0].axhline(a_true, color='r', label='true')
axes[0].legend(loc='best')
axes[0].set_ylabel('$a$')
axes[1].axhline(b_true, color='r')
axes[1].set_ylabel('$b$')
axes[2].axhline(np.log(V_true), color='r')
axes[2].set_ylabel(r'$\ln V$')
fig.tight_layout()
fig = corner.corner(chain[2000:], bins=32, labels=['$a$', '$b$', r'$\ln V$'],
truths=[a_true, b_true, np.log(V_true)])
good_samples = chain[2000::8]
good_samples.shape
low,med,hi = np.percentile(good_samples, [16, 50, 84], axis=0)
upper, lower = hi-med, med-low
disp_str = ""
for i,name in enumerate(['a', 'b', r'\ln V']):
fmt_str = '{name}={val:.2f}^{{+{plus:.2f}}}_{{-{minus:.2f}}}'
disp_str += fmt_str.format(name=name, val=med[i], plus=upper[i], minus=lower[i])
disp_str += r'\quad '
disp_str = "${}$".format(disp_str)
display.Latex(data=disp_str)
disp_str = ""
for i,name in zip([1,0], ['a', 'b']):
fmt_str = r'{name}={val:.2f} \pm {err:.2f}'
disp_str += fmt_str.format(name=name, val=best_pars[i], err=np.sqrt(pars_Cov[i,i]))
disp_str += r'\quad '
disp_str = "${}$".format(disp_str)
display.Latex(data=disp_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Intro and choice of objective function
Step2: Exercise 1
Step3: Execute the code below and verify that it executes
Step4: Now let's forget that we did that -- we know nothing about the model parameters, except that we think the true values of the data are well-described by a linear relation! We would like to measure the "best-fit" parameters of this model (for a straight line, the slope and intercept $(a,b)$) given the data above. In math, our model for the data $y$ is
Step5: Verify that you've correctly implemented your functions by executing the following cell
Step6: We can demonstrate that these are convex (over some domain) by computing the objective function values over a grid of parameter values (a grid in $a, b$)
Step7: There are minima in both cases near the true values of the parameters (good), but the functions clearly look different. Which one should we choose for finding the best parameters?
Step12: There we have it! The best-fit parameters and their errors for the straight-line fit, optimized with the only justifyable objective function, directly from a few linear algebra calculations.
Step13: Now we'll repeat what we did above to map out the value of the log-posterior over a 2D grid of parameter values. Because we used a flat prior, you'll notice it looks identical to the visualization of the weighted_squared_deviation -- only the likelihood has any slope to it!
Step14: Exercise 5
Step16: Now what do we do? The answer depends a bit on your intentions. If you'd like to propagate the posterior pdf (as in, pass on to other scientists to use your results), what do you do if the posterior pdf isn't analytic? And what numbers do you put in your abstract? One option is to draw samples from your posterior pdf and compute summary statistics (e.g., median and quantils) using the samples. That's the approach we're going to take.
Step17: Now we'll run the sampler! Let's start from some arbitrary position allowed by our prior.
Step18: Let's look at the chain returned, the parameter value positions throughout the sampler run
Step19: We can also look at the individual parameter traces, i.e. the 1D functions of parameter value vs. step number for each parameter separately
Step20: From these trace plots, we can see by eye that it takes the sampler about a few hundred steps to converge. When we look at the samples returned or when we compute our summary statistics, we don't want to include these parameter values! In addition, there is likely some correlation between nearby steps. We can attempt to remove some of the correlated steps by thinning the chain, i.e. by downsampling. We can do both simultaneously using Python indexing tricks. Certainly by step 2000 the chains look converged, so from there on we'll keep only every 8th step
Step21: We're left with 774 samples; we hope these are approximately uncorrelated, converged samples from the posterior pdf (there are other ways we can check, but these are out of scope for this workshop). Now you have to choose what summary statistics to report. You have some options, but a reasonable choice is to report the median, 16th, and 84th percentiles
Step22: Recall that the true values are
Step23: We've now done this problem the Bayesian way as well! Now, instead of drawing the "best-fit" line over the data, we can take a handful of samples and plot a line for each of the samples, as a way to visualize the uncertainty we have in the model parameters
Step24: Or, we can plot the samples using a corner plot to visualize the structure of the 2D and 1D (marginal) posteriors
Step25: Finally, the problem you came here for
Step26: Let's first naively fit the data assuming no intrinsic scatter using least-squares
Step27: The covariance matrix for the parameters is
Step30: We clearly get a biased result and yet very precise measurements of the parameters when we don't take in to account the intrinsic scatter. What we need to do now is modify out model to include the scatter as a free parameter. Unfortunately, it enters the model non-linearly so there is no solution using linear algebra or least-squares. Instead, we just write a new likelihood function and optimize it numerically. One choice we'll make is to use the parameter $\ln{V}$ instead of $V$ for reasons I'll explain later. To implement the new model, we'll subclass our StraightLineModel class and define new likelihood and prior functions.
Step31: It looks like the maximum likelihood estimate is a little bit better, and we get a reasonable measurement of the intrinsic scatter, but none of this gives us a handle on the uncertainty. How do we quantify the uncertainty in the now 3 parameters? We'll just run MCMC.
Step32: Now we'll again compute the percentiles for the 1D, marginal distributions
Step33: Compare this to the diagonal elements of the covariance matrix we got from ignoring the intrinsic scatter and doing least-squares fitting
|
3,728
|
<ASSISTANT_TASK:>
Python Code:
#@title Default title text
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow.compat.v2 as tf
import matplotlib.pyplot as plt
tf.enable_v2_behavior()
n = 20
lower, upper = -2, 2
modes_x, modes_y = zip(*[ # (x, y)
(-1., -20),
(0.0, -20),
(1., -20),
])
x = np.concatenate((modes_x, np.random.uniform(lower, upper, size=n-len(modes_x))))
y = np.random.uniform(-1000, -300, size=n)
y[:len(modes_x)] = modes_y
y /= np.max(np.abs(y))
plt.scatter(x, y)
plt.show()
# Mode
p=0.1
q=1/p
t = np.linspace(-2,2,200).reshape(-1, 1)
L = np.mean(np.power(np.abs(-np.power(np.abs(t - x[np.newaxis,:]), p) - y[np.newaxis,:]), q), axis=1)
plt.plot(t, L)
plt.show()
# Median
p=1.0
q=2.0
t = np.linspace(-2,2,200).reshape(-1, 1)
L = np.mean(np.power(np.abs(-np.power(np.abs(t - x[np.newaxis,:]), p) - y[np.newaxis,:]), q), axis=1)
#plt.ylim(0.5, 1.)
plt.plot(t, L)
plt.show()
# Mean
p=2.0
q=2.0
t = np.linspace(-2,2,200).reshape(-1, 1)
L = np.mean(np.power(np.abs(-np.power(np.abs(t - x[np.newaxis,:]), p) - y[np.newaxis,:]), q), axis=1)
#plt.ylim(0.5, 1.)
plt.plot(t, L)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Goal
Step2: Generalize the loss above to
|
3,729
|
<ASSISTANT_TASK:>
Python Code:
from collections import namedtuple
from copy import deepcopy
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import cosine_similarity
from sqlalchemy import create_engine
from dotify.config import DevelopmentConfig as Config
from dotify.recommendation.implicit_mf.ratings_matrix import RatingsMatrix
from dotify.recommendation.implicit_mf.implicit_mf import ImplicitMF
from dotify.resources.countries import countries as countries_lookup
ENGINE = create_engine(Config.SQLALCHEMY_DATABASE_URI)
%matplotlib inline
plt.style.use('seaborn')
ratings_matrix = RatingsMatrix(n_top_songs=10000, eps=1e3)
ratings_matrix.R_ui.head()
sparsity = (ratings_matrix.R_ui == 0).mean().mean()
non_zero_entries_percent = 100*np.round(1 - sparsity, 4)
print('Our ratings matrix has {}% non-zero entries.'.format(non_zero_entries_percent))
print('Our ratings matrix contains {} countries and {} unique songs.'.format(*ratings_matrix.R_ui.shape))
plt.figure(figsize=(10, 6))
songs_rated_by_country = (ratings_matrix.R_ui > 0).sum(axis=1)
plt.hist(songs_rated_by_country)
plt.title('Distribution of # of Unique Songs Streamed per Country', fontsize=15)
plt.xlabel('Unique Songs Streamed', fontsize=12)
_ = plt.ylabel('# of Countries', fontsize=12)
print('The 15th percentile of songs rated by country is {}.'.format(
np.percentile(songs_rated_by_country, 15)
))
def split_ratings_matrix_into_training_and_validation(ratings_matrix, eligibility_mask, fraction_to_drop=.2):
training_matrix = ratings_matrix
validation_matrix = deepcopy(training_matrix)
validation_matrix.R_ui = pd.DataFrame(0., index=training_matrix.R_ui.index, columns=training_matrix.R_ui.columns)
for country_id, ratings in training_matrix.R_ui[eligibility_mask].iterrows():
rated_songs_mask = training_matrix.R_ui.ix[country_id] > 0
rated_songs = training_matrix.R_ui.ix[country_id][rated_songs_mask].index.tolist()
n_songs_to_drop = int( len(rated_songs)*fraction_to_drop )
songs_to_drop = set( random.sample(rated_songs, n_songs_to_drop) )
validation_matrix.R_ui.ix[country_id][songs_to_drop] = training_matrix.R_ui.ix[country_id][songs_to_drop]
training_matrix.R_ui.ix[country_id][songs_to_drop] = 0.
return training_matrix, validation_matrix
more_than_50_ratings_mask = songs_rated_by_country > 50
training_matrix, validation_matrix = split_ratings_matrix_into_training_and_validation(
ratings_matrix=ratings_matrix,
eligibility_mask=more_than_50_ratings_mask
)
class ExpectedPercentileRankingsEvaluator:
def __init__(self, training_matrix, validation_matrix, eligibility_mask, predicted_preferences):
self.training_matrix = training_matrix
self.validation_matrix = validation_matrix
self.eligibility_mask = eligibility_mask
self.predicted_preferences = predicted_preferences
self._expected_percentile_rankings_train = []
self._expected_percentile_rankings_validation = []
def run(self):
self._evaluate_train()
self._evaluate_validation()
def _evaluate_train(self):
self._expected_percentile_rankings_train = self._evaluate(matrix=self.training_matrix)
def _evaluate_validation(self):
self._expected_percentile_rankings_validation = self._evaluate(matrix=self.validation_matrix)
def _evaluate(self, matrix):
expected_percentile_rankings = []
for country_id, preferences in self.predicted_preferences[self.eligibility_mask].iterrows():
predictions = pd.DataFrame({
'predicted_preference': preferences.sort_values(ascending=False),
'rank': np.arange( len(preferences) ),
'percentile_rank': np.arange( len(preferences) ) / len(preferences)
})
ground_truth = matrix.R_ui.ix[country_id][ matrix.R_ui.ix[country_id] > 0 ]
numerator = (ground_truth * predictions['percentile_rank'][ground_truth.index]).sum()
denominator = ground_truth.sum()
expected_percentile_rankings.append( numerator / denominator )
return expected_percentile_rankings
@property
def mean_expected_percentile_rankings_train(self):
return np.mean(self._expected_percentile_rankings_train)
@property
def mean_expected_percentile_rankings_validation(self):
return np.mean(self._expected_percentile_rankings_validation)
F = 30
grid_search_results = {}
result = namedtuple('Result', 'alpha lmbda')
alpha_values = [1e-1, 1e0, 1e1, 1e2]
lmbda_values = [1e-1, 1e0, 1e1, 1e2]
for alpha in alpha_values:
for lmbda in lmbda_values:
implicit_mf = ImplicitMF(ratings_matrix=training_matrix, f=F, alpha=alpha, lmbda=lmbda)
implicit_mf.run()
predicted_preferences = implicit_mf.country_vectors.vectors.dot( implicit_mf.song_vectors.vectors.T )
evaluator = ExpectedPercentileRankingsEvaluator(
training_matrix=training_matrix,
validation_matrix=validation_matrix,
eligibility_mask=more_than_50_ratings_mask,
predicted_preferences=predicted_preferences
)
evaluator.run()
grid_search_results[result(alpha=alpha, lmbda=lmbda)] = {
'train': evaluator.mean_expected_percentile_rankings_train,
'validation': evaluator.mean_expected_percentile_rankings_validation
}
print(grid_search_results)
grid_search_items = [(params, results) for params, results in grid_search_results.items()]
grid_search_items.sort(key=lambda tup: tup[0].alpha)
params, results = zip(*grid_search_items)
validation_results = np.array([result['validation'] for result in results]).reshape(4, 4)
plt.figure(figsize=(15, 10))
plt.xticks([])
plt.yticks([])
plt.pcolormesh(-validation_results, cmap='Reds')
plt.colorbar()
for alpha_index, alpha in enumerate(alpha_values):
for lbmda_index, lmbda in enumerate(lmbda_values):
plt.text(
x = lbmda_index + 0.5,
y = alpha_index + 0.5,
s = r'$\alpha$ = {}'.format(alpha) + '\n' + r'$\lambda$ = {}'.format(lmbda),
ha='center',
va='center',
size=15,
color='w'
)
plt.title('Grid Search Results', fontsize=15)
plt.xlabel(r'$\lambda$ Values', fontsize=14)
_ = plt.ylabel(r'$\alpha$ Values', fontsize=14)
best_params = min(grid_search_results, key=lambda key: grid_search_results.get(key)['validation'])
print('The best parameters were found to be: {}.'.format(best_params))
ratings_matrix = RatingsMatrix(n_top_songs=1000000, eps=1e3)
print('Our ratings matrix contains {} countries and {} unique songs.'.format(*ratings_matrix.R_ui.shape))
implicit_mf = ImplicitMF(ratings_matrix=ratings_matrix, f=F, alpha=best_params.alpha, lmbda=best_params.lmbda)
implicit_mf.run()
country_vectors_df = implicit_mf.country_vectors.vectors.copy()
country_id_to_name = {countries_lookup[name]['id']: name for name in countries_lookup}
country_ids = country_vectors_df.index
country_names = pd.Index([country_id_to_name[c_id] for c_id in country_ids], name='country_name')
country_vectors_df.index = country_names
country_vectors_df.head()
sns.set(style="white")
def plot_cosine_similarities(country_vectors_df):
similarities_df = pd.DataFrame(
data=cosine_similarity(country_vectors_df),
index=country_vectors_df.index,
columns=country_vectors_df.index
)
lower_triangle_mask = np.zeros_like(similarities_df, dtype=np.bool)
lower_triangle_mask[np.triu_indices_from(lower_triangle_mask)] = True
f, ax = plt.subplots(figsize=(21, 21))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(
similarities_df,
mask=lower_triangle_mask,
cmap=cmap,
vmax=.5,
square=True,
xticklabels=True,
yticklabels=True,
linewidths=1,
cbar_kws={"shrink": .5},
ax=ax,
)
ax.set_title('Cosine Similarity Matrix', fontsize=20)
plot_cosine_similarities(country_vectors_df)
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, random_state=12345)
country_embeddings = pd.DataFrame(
data=tsne.fit_transform(country_vectors_df),
index=country_vectors_df.index,
columns=['dim_1', 'dim_2']
)
def plot_tsne_embeddings(country_embeddings):
plt.figure(figsize=(15,15))
for country_name, country_embedding in country_embeddings.iterrows():
dim_1, dim_2 = country_embedding
plt.scatter(dim_1, dim_2)
plt.annotate(country_name, xy=(dim_1, dim_2), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.title('Two-Dimensional TSNE Embeddings of Latent Country Vectors', fontsize=16)
plt.xlabel('Dimension 1', fontsize=12)
plt.ylabel('Dimension 2', fontsize=12)
plt.show()
plot_tsne_embeddings(country_embeddings)
SONG_METADATA_QUERY =
SELECT
songs.title as song_title,
songs.artist as song_artist,
songs.id as song_id
FROM songs
song_metadata_df = pd.read_sql(SONG_METADATA_QUERY, ENGINE, index_col=['song_id'])
song_vectors_df = song_metadata_df.join(implicit_mf.song_vectors.vectors, how='inner')\
.set_index(['song_title', 'song_artist'])
song_vectors_df_norm = song_vectors_df.apply(lambda vec: vec / np.linalg.norm(vec), axis=1)
country_vectors_df_norm = country_vectors_df.apply(lambda vec: vec / np.linalg.norm(vec), axis=1)
assert song_vectors_df_norm.apply(np.linalg.norm, axis=1).mean() == 1.
assert country_vectors_df_norm.apply(np.linalg.norm, axis=1).mean() == 1.
pd.set_option('display.max_colwidth', 100)
country_vec = country_vectors_df_norm.ix['United States']
song_vectors_df_norm.dot(country_vec).sort_values(ascending=False).reset_index().head(10)
country_vec = country_vectors_df_norm.ix['Colombia']
song_vectors_df_norm.dot(country_vec).sort_values(ascending=False).reset_index().head(10)
country_vec = country_vectors_df_norm.ix['Turkey']
song_vectors_df_norm.dot(country_vec).sort_values(ascending=False).reset_index().head(10)
country_vec = country_vectors_df_norm.ix['Germany']
song_vectors_df_norm.dot(country_vec).sort_values(ascending=False).reset_index().head(10)
country_vec = country_vectors_df_norm.ix['Taiwan']
song_vectors_df_norm.dot(country_vec).sort_values(ascending=False).reset_index().head(10)
song_vectors_df.apply(np.linalg.norm, axis=1).sort_values(ascending=False).reset_index().head(10)
country_vectors_df.apply(np.linalg.norm, axis=1).sort_values(ascending=False).reset_index().head(10)
country_vec = country_vectors_df.ix['Taiwan']
song_vectors_df.dot(country_vec).sort_values(ascending=False).reset_index().head(10)
country_vec = country_vectors_df.ix['Colombia']
song_vectors_df.dot(country_vec).sort_values(ascending=False).reset_index().head(10)
C = .2
country_vec = country_vectors_df_norm.ix['Taiwan']
(C * song_vectors_df).dot(country_vec).sort_values(ascending=False).reset_index().head(10)
from itertools import product
from keras import backend as K
from keras.layers import Input, Embedding, Flatten, Lambda, Dense, BatchNormalization, Dropout, LSTM
from keras.layers.merge import add, dot, concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.regularizers import l2
from keras.utils import plot_model
songs_rated_by_country = (ratings_matrix.R_ui > 0).sum(axis=1)
plt.hist(songs_rated_by_country)
plt.title('Distribution of # of Unique Songs Streamed per Country', fontsize=15)
plt.xlabel('Unique Songs Streamed', fontsize=12)
_ = plt.ylabel('# of Countries', fontsize=12)
print('The 15th percentile of songs rated by country is {}.'.format(
np.percentile(songs_rated_by_country, 15)
))
more_than_200_ratings_mask = songs_rated_by_country > 20
training_matrix, validation_matrix = split_ratings_matrix_into_training_and_validation(
ratings_matrix=ratings_matrix,
eligibility_mask=more_than_200_ratings_mask
)
song_metadata_df = song_metadata_df.ix[ratings_matrix.R_ui.columns]
song_metadata_df.head()
country_id_to_index = {c_id: index for index, c_id in enumerate(ratings_matrix.R_ui.index)}
song_id_to_index = {s_id: index for index, s_id in enumerate(ratings_matrix.R_ui.columns)}
song_artist_to_index = {artist: index for index, artist in enumerate(song_metadata_df['song_artist'].unique())}
song_metadata_df['song_index'] = song_metadata_df.index.map(song_id_to_index.get)
song_metadata_df['song_artist_index'] = song_metadata_df['song_artist'].map(song_artist_to_index.get).astype(int)
song_metadata_df.tail()
NUM_WORDS = 5000
tokenizer = Tokenizer(num_words=NUM_WORDS, lower=True, split=" ")
tokenizer.fit_on_texts(song_metadata_df['song_title'])
song_metadata_df['title_sequence'] = tokenizer.texts_to_sequences(song_metadata_df['song_title'])
input_tuples = list( product(ratings_matrix.R_ui.index, ratings_matrix.R_ui.columns) )
country_ids, song_ids = zip(*input_tuples)
ratings_df = pd.DataFrame({
'country_id': country_ids,
'song_id': song_ids,
'train_rating': training_matrix.R_ui.values.flatten(),
'validation_rating': validation_matrix.R_ui.values.flatten(),
})
ratings_df = ratings_df.join(song_metadata_df, on='song_id', how='inner').sort_index()
ratings_df['country_index'] = ratings_df['country_id'].map(country_id_to_index.get)
n_countries = ratings_df['country_index'].nunique()
n_songs = ratings_df['song_index'].nunique()
n_artists = ratings_df['song_artist_index'].nunique()
alpha = best_params.alpha
lmbda = best_params.lmbda
F = 30
LEARNING_RATE = .001
def implicit_cf_loss(y_true, y_pred, alpha=alpha):
P = y_true > 0
C = 1 + alpha*y_true
return K.mean( C*(P - y_pred)**2 )
def generate_predictions(model, inputs, ratings_df=ratings_df):
predictions = model.predict(x=inputs)
return pd.DataFrame({
'prediction': predictions.flatten(),
'country_id': ratings_df['country_id'],
'song_id': ratings_df['song_id']
}).pivot('country_id', 'song_id', 'prediction')
def evaluate_predictions(predictions, eligibility_mask=more_than_200_ratings_mask,
training_matrix=training_matrix, validation_matrix=validation_matrix):
evaluator = ExpectedPercentileRankingsEvaluator(
training_matrix=training_matrix,
validation_matrix=validation_matrix,
eligibility_mask=eligibility_mask,
predicted_preferences=predictions
)
evaluator.run()
print('Train: {}'.format(evaluator.mean_expected_percentile_rankings_train))
print('Validation: {}'.format(evaluator.mean_expected_percentile_rankings_validation))
def visualize_predictions(predictions):
plt.figure(figsize=(10, 6))
plt.hist(predictions.values.flatten(), bins=25)
plt.title('Predicted Preferences Histogram', fontsize=15)
plt.xlabel('Predicted Prefence', fontsize=12)
_ = plt.ylabel('Count', fontsize=12)
country_input = Input(shape=(1,), dtype='int64', name='country')
song_input = Input(shape=(1,), dtype='int64', name='song')
country_embedding = Embedding(input_dim=n_countries, output_dim=F, embeddings_regularizer=l2(lmbda))(country_input)
song_embedding = Embedding(input_dim=n_songs, output_dim=F, embeddings_regularizer=l2(lmbda))(song_input)
predicted_preference = dot(inputs=[country_embedding, song_embedding], axes=2)
predicted_preference = Flatten()(predicted_preference)
model = Model(inputs=[country_input, song_input], outputs=predicted_preference)
model.compile(loss=implicit_cf_loss, optimizer=Adam(lr=LEARNING_RATE))
plot_model(model, to_file='figures/network_1.png')
inputs = [ratings_df['country_index'], ratings_df['song_index']]
network_1 = model.fit(
x=inputs,
y=ratings_df['train_rating'],
batch_size=1024,
epochs=5,
validation_data=(inputs, ratings_df['validation_rating'])
)
predictions = generate_predictions(model=model, inputs=inputs)
evaluate_predictions(predictions)
grid_search_results[best_params]
visualize_predictions(predictions)
country_bias = Embedding(input_dim=n_countries, output_dim=1, name='country_bias', input_length=1)(country_input)
song_bias = Embedding(input_dim=n_songs, output_dim=1, name='song_bias', input_length=1)(song_input)
predicted_preference = add(inputs=[predicted_preference, song_bias, country_bias])
predicted_preference = Flatten()(predicted_preference)
model = Model(inputs=[country_input, song_input], outputs=predicted_preference)
model.compile(loss=implicit_cf_loss, optimizer=Adam(lr=LEARNING_RATE))
plot_model(model, to_file='figures/network_2.png')
inputs = [ratings_df['country_index'], ratings_df['song_index']]
network_2 = model.fit(
x=inputs,
y=ratings_df['train_rating'],
batch_size=1024,
epochs=5,
validation_data=(inputs, ratings_df['validation_rating'])
)
predictions = generate_predictions(model=model, inputs=inputs)
evaluate_predictions(predictions)
_ = pd.DataFrame({
'predictions': predictions.values.flatten(),
'ratings': ratings_df['train_rating'].values.flatten()
}).hist(figsize=(16, 6))
country_input = Input(shape=(1,), dtype='int64', name='country')
song_input = Input(shape=(1,), dtype='int64', name='song')
country_embedding = Embedding(input_dim=n_countries, output_dim=F, embeddings_regularizer=l2(lmbda))(country_input)
song_embedding = Embedding(input_dim=n_songs, output_dim=F, embeddings_regularizer=l2(lmbda))(song_input)
country_bias = Embedding(input_dim=n_countries, output_dim=1, name='country_bias', input_length=1)(country_input)
song_bias = Embedding(input_dim=n_songs, output_dim=1, name='song_bias', input_length=1)(song_input)
concatenation = concatenate([country_embedding, song_embedding])
dense_layer = Dense(activation='relu', units=10)(concatenation)
dropout = Dropout(.5)(dense_layer)
batch_norm = BatchNormalization()(dropout)
dense_layer = Dense(activation='relu', units=10)(batch_norm)
batch_norm = BatchNormalization()(dense_layer)
dense_layer = Dense(activation='relu', units=10)(batch_norm)
batch_norm = BatchNormalization()(dense_layer)
predicted_preference = Dense(units=1, name='predicted_preference')(batch_norm)
predicted_preference = Flatten()(predicted_preference)
predicted_preference = add(inputs=[predicted_preference, country_bias, song_bias])
predicted_preference = Flatten()(predicted_preference)
model = Model(inputs=[country_input, song_input], outputs=predicted_preference)
model.compile(loss=implicit_cf_loss, optimizer=Adam(lr=LEARNING_RATE))
plot_model(model, to_file='figures/network_3.png')
inputs = [ratings_df['country_index'], ratings_df['song_index']]
network_3 = model.fit(
x=inputs,
y=ratings_df['train_rating'],
batch_size=1024,
epochs=5,
validation_data=(inputs, ratings_df['validation_rating'])
)
predictions = generate_predictions(model=model, inputs=inputs)
evaluate_predictions(predictions)
MAX_SEQUENCE_LENGTH = ratings_df['title_sequence'].map(len).max()
padded_title_sequences = pad_sequences(sequences=ratings_df['title_sequence'], maxlen=MAX_SEQUENCE_LENGTH)
country_input = Input(shape=(1,), dtype='int64', name='country')
song_input = Input(shape=(1,), dtype='int64', name='song')
title_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32', name='song_title_sequence')
country_embedding = Embedding(input_dim=n_countries, output_dim=F, embeddings_regularizer=l2(lmbda))(country_input)
song_embedding = Embedding(input_dim=n_songs, output_dim=F, embeddings_regularizer=l2(lmbda))(song_input)
title_embedding = Embedding(output_dim=F, input_dim=NUM_WORDS, input_length=MAX_SEQUENCE_LENGTH)(title_input)
country_bias = Embedding(input_dim=n_countries, output_dim=1, name='country_bias', input_length=1)(country_input)
song_bias = Embedding(input_dim=n_songs, output_dim=1, name='song_bias', input_length=1)(song_input)
predicted_preference = dot(inputs=[country_embedding, song_embedding], axes=2)
predicted_preference = add(inputs=[predicted_preference, country_bias, song_bias])
predicted_preference = Flatten()(predicted_preference)
title_lstm = LSTM(F)(title_embedding)
dense_title_lstm = Dense(units=1, activation='relu')(title_lstm)
predicted_preference_merge = concatenate(inputs=[predicted_preference, dense_title_lstm])
final_output = Dense(activation='linear', units=1)(predicted_preference_merge)
model = Model(inputs=[country_input, song_input, title_input], outputs=final_output)
model.compile(loss=implicit_cf_loss, optimizer=Adam(lr=LEARNING_RATE))
plot_model(model, to_file='figures/network_4.png')
inputs = [ratings_df['country_index'], ratings_df['song_index'], padded_title_sequences]
network_4 = model.fit(
x=inputs,
y=ratings_df['train_rating'],
batch_size=1024,
epochs=5,
validation_data=(inputs, ratings_df['validation_rating'])
)
predictions = generate_predictions(model=model, inputs=inputs)
evaluate_predictions(predictions)
_ = pd.DataFrame({
'predictions': predictions.values.flatten(),
'ratings': ratings_df['train_rating'].values.flatten()
}).hist(figsize=(16, 6))
artist_input = Input(shape=(1,), dtype='int64', name='artist')
artist_embedding = Embedding(input_dim=n_artists, output_dim=F, embeddings_regularizer=l2(lmbda))(artist_input)
dense_artist_embedding = Dense(units=1, activation='relu')(artist_embedding)
dense_artist_embedding = Flatten()(dense_artist_embedding)
predicted_preference_merge = concatenate(inputs=[predicted_preference, dense_title_lstm, dense_artist_embedding])
final_output = Dense(activation='linear', units=1)(predicted_preference_merge)
model = Model(inputs=[country_input, song_input, title_input, artist_input], outputs=final_output)
model.compile(loss=implicit_cf_loss, optimizer=Adam(lr=LEARNING_RATE))
plot_model(model, to_file='figures/network_5.png')
inputs = [
ratings_df['country_index'],
ratings_df['song_index'],
padded_title_sequences,
ratings_df['song_artist_index']
]
model.fit(
x=inputs,
y=ratings_df['train_rating'],
batch_size=1024,
epochs=5,
validation_data=(inputs, ratings_df['validation_rating'])
)
predictions = generate_predictions(model=model, inputs=inputs)
evaluate_predictions(predictions)
country_input = Input(shape=(1,), dtype='int64', name='country')
song_input = Input(shape=(1,), dtype='int64', name='song')
country_embedding = Embedding(input_dim=n_countries, output_dim=F, embeddings_regularizer=l2(lmbda))(country_input)
song_embedding = Embedding(input_dim=n_songs, output_dim=F, embeddings_regularizer=l2(lmbda))(song_input)
country_bias = Embedding(input_dim=n_countries, output_dim=1, name='country_bias', input_length=1)(country_input)
song_bias = Embedding(input_dim=n_songs, output_dim=1, name='song_bias', input_length=1)(song_input)
predicted_preference = dot(inputs=[country_embedding, song_embedding], axes=2)
predicted_preference = Flatten()(predicted_preference)
predicted_preference = add(inputs=[predicted_preference, song_bias, country_bias])
predicted_preference = Flatten()(predicted_preference)
model = Model(inputs=[country_input, song_input], outputs=predicted_preference)
model.compile(loss=implicit_cf_loss, optimizer=Adam(lr=LEARNING_RATE))
inputs = [ratings_df['country_index'], ratings_df['song_index']]
network_2 = model.fit(
x=inputs,
y=ratings_df['train_rating'],
batch_size=1024,
epochs=5,
validation_data=(inputs, ratings_df['validation_rating'])
)
predictions = generate_predictions(model=model, inputs=inputs)
evaluate_predictions(predictions)
predictions_unstack = predictions.unstack()
predictions_unstack = pd.DataFrame({
'song_title': song_metadata_df.ix[predictions_unstack.index.get_level_values('song_id')]['song_title'],
'song_artist': song_metadata_df.ix[predictions_unstack.index.get_level_values('song_id')]['song_artist'],
'country': unstack.index.get_level_values('country_id').map(country_id_to_name.get),
'rating': predictions_unstack.values
})
predictions_unstack[predictions_unstack['country'] == 'Colombia'].sort_values(by='rating', ascending=False).head(10)
predictions_unstack[predictions_unstack['country'] == 'United States'].sort_values(by='rating', ascending=False).head(10)
predictions_unstack[predictions_unstack['country'] == 'Turkey'].sort_values(by='rating', ascending=False).head(10)
predictions_unstack[predictions_unstack['country'] == 'Taiwan'].sort_values(by='rating', ascending=False).head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Abstract
Step2: Next, let's get an idea of the sparsity of our data and how many songs each country has streamed.
Step3: Construct training, validation sets
Step4: Let's choose 50 songs as our cutoff, and move 20% of the songs in qualifying rows to a validation set.
Step5: Evaluation
Step6: Grid search
Step7: Let's visualize the results for clarity. I plot the opposite of the validation score such that the parameters corresponding to the darkest square are most favorable.
Step8: Train a final model
Step9: Visualize
Step10: Then, we'll plot the cosine similarities.
Step11: I first turn to my favorite country - Colombia - to inspect its similarity with other Latin American countries. The numbers are high for
Step13: This is not particularly useful. Furthermore, and perhaps most importantly, the plot varies immensely when changing the random seed. This is a worthwhile step nonetheless.
Step14: United States
Step15: Colombia
Step16: Turkey
Step17: Germany
Step18: Taiwan
Step19: These look great.
Step20: To reference Taiwan and Colombia, let's inspect predictions using unnormalized vectors.
Step21: Respectively, there is not a song with Chinese nor Spanish letters in each set of recommendations. Additionally, it seems like the world is fond of Ed Sheeran. It is clear that the global popularity of songs dominates our recommendations when computing the dot product with unnormalized vectors.
Step22: About the same. Nevertheless, the length of our vectors can likely offer some value when balancing local recommendations with globally popular songs. I leave this to the reader to explore further for now.
Step23: Let's set our eligibility cutoff at 200 songs and create training and validation matrices.
Step24: Arrange our data
Step25: Tokenize song title
Step26: Convert ratings to long format
Step27: Define network parameters
Step28: Define a loss function
Step29: Define functions to generate, evaluate and visualize predictions
Step30: <a id="network_1"></a>
Step31:
Step32: For reference, let's recall the results we computed when validation our model with best_params.
Step33: Additionally, a random model should return an expected percentile ranking of ~50%, as noted in Hu, Koren and Volinksy. So, this is really bad. Let's visualize the distribution of predictions before moving on to another model.
Step34: <a id="network_2"></a>
Step35:
Step36: This looks a lot better. Now let's visualize our ground-truth and predictions side-by-side.
Step37: This model approximates the training distribution much better than the others. Key to this task is delineating between items with 0 ratings, and those with high ratings. Remember, we first down-scaled our ratings with the following transformation to shrink the domain
Step38:
Step39: These results are roughly identical to the previous. Choosing between the models, we'd certainly go with the simpler of the two.
Step40:
Step41: We seem to get a pinch of lift from the song title, though we're still underfitting our training set. Perhaps we could make our model bigger, or run it for more epochs.
Step42: Finally, let's add the artist.
Step43:
Step44: This seems a bit worse than the last, though similar all the same. I'm a bit surprised we didn't get more lift from artists. Nevertheless, we're still underfitting. Bigger model, more epochs, etc.
Step45: Colombia
Step46: United States
Step47: Turkey
Step48: Taiwan
|
3,730
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
from statsmodels import regression
from scipy import poly1d
x = np.arange(10)
y = 2*np.random.randn(10) + x**2
xs = np.linspace(-0.25, 9.25, 200)
lin = np.polyfit(x, y, 1)
quad = np.polyfit(x, y, 2)
many = np.polyfit(x, y, 9)
plt.scatter(x, y)
plt.plot(xs, poly1d(lin)(xs))
plt.plot(xs, poly1d(quad)(xs))
plt.plot(xs, poly1d(many)(xs))
plt.ylabel('Y')
plt.xlabel('X')
plt.legend(['Underfit', 'Good fit', 'Overfit']);
# Load one year's worth of pricing data for five different assets
start = '2013-01-01'
end = '2014-01-01'
x1 = get_pricing('PEP', fields='price', start_date=start, end_date=end)
x2 = get_pricing('MCD', fields='price', start_date=start, end_date=end)
x3 = get_pricing('ATHN', fields='price', start_date=start, end_date=end)
x4 = get_pricing('DOW', fields='price', start_date=start, end_date=end)
y = get_pricing('PG', fields='price', start_date=start, end_date=end)
# Build a linear model using only x1 to explain y
slr = regression.linear_model.OLS(y, sm.add_constant(x1)).fit()
slr_prediction = slr.params[0] + slr.params[1]*x1
# Run multiple linear regression using x1, x2, x3, x4 to explain y
mlr = regression.linear_model.OLS(y, sm.add_constant(np.column_stack((x1,x2,x3,x4)))).fit()
mlr_prediction = mlr.params[0] + mlr.params[1]*x1 + mlr.params[2]*x2 + mlr.params[3]*x3 + mlr.params[4]*x4
# Compute adjusted R-squared for the two different models
print 'SLR R-squared:', slr.rsquared_adj
print 'SLR p-value:', slr.f_pvalue
print 'MLR R-squared:', mlr.rsquared_adj
print 'MLR p-value:', mlr.f_pvalue
# Plot y along with the two different predictions
y.plot()
slr_prediction.plot()
mlr_prediction.plot()
plt.ylabel('Price')
plt.xlabel('Date')
plt.legend(['PG', 'SLR', 'MLR']);
# Load the next of pricing data
start = '2014-01-01'
end = '2015-01-01'
x1 = get_pricing('PEP', fields='price', start_date=start, end_date=end)
x2 = get_pricing('MCD', fields='price', start_date=start, end_date=end)
x3 = get_pricing('ATHN', fields='price', start_date=start, end_date=end)
x4 = get_pricing('DOW', fields='price', start_date=start, end_date=end)
y = get_pricing('PG', fields='price', start_date=start, end_date=end)
# Extend our model from before to the new time period
slr_prediction2 = slr.params[0] + slr.params[1]*x1
mlr_prediction2 = mlr.params[0] + mlr.params[1]*x1 + mlr.params[2]*x2 + mlr.params[3]*x3 + mlr.params[4]*x4
# Manually compute adjusted R-squared over the new time period
adj = float(len(y) - 1)/(len(y) - 5) # Compute adjustment factor
SST = sum((y - np.mean(y))**2)
SSRs = sum((slr_prediction2 - y)**2)
print 'SLR R-squared:', 1 - adj*SSRs/SST
SSRm = sum((mlr_prediction2 - y)**2)
print 'MLR R-squared:', 1 - adj*SSRm/SST
# Plot y along with the two different predictions
y.plot()
slr_prediction2.plot()
mlr_prediction2.plot()
plt.ylabel('Price')
plt.xlabel('Date')
plt.legend(['PG', 'SLR', 'MLR']);
# Load the pricing data for a stock
start = '2011-01-01'
end = '2013-01-01'
pricing = get_pricing('MCD', fields='price', start_date=start, end_date=end)
# Compute rolling averages for various window lengths
mu_30d = pd.rolling_mean(pricing, window=30)
mu_60d = pd.rolling_mean(pricing, window=60)
mu_100d = pd.rolling_mean(pricing, window=100)
# Plot asset pricing data with rolling means from the 100th day, when all the means become available
plt.plot(pricing[100:], label='Asset')
plt.plot(mu_30d[100:], label='30d MA')
plt.plot(mu_60d[100:], label='60d MA')
plt.plot(mu_100d[100:], label='100d MA')
plt.xlabel('Day')
plt.ylabel('Price')
plt.legend();
# Trade using a simple mean-reversion strategy
def trade(stock, length):
# If window length is 0, algorithm doesn't make sense, so exit
if length == 0:
return 0
# Compute rolling mean and rolling standard deviation
mu = pd.rolling_mean(stock, window=length)
std = pd.rolling_std(stock, window=length)
# Compute the z-scores for each day using the historical data up to that day
zscores = (stock - mu)/std
# Simulate trading
# Start with no money and no positions
money = 0
count = 0
for i in range(len(stock)):
# Sell short if the z-score is > 1
if zscores[i] > 1:
money += stock[i]
count -= 1
# Buy long if the z-score is < 1
elif zscores[i] < -1:
money -= stock[i]
count += 1
# Clear positions if the z-score between -.5 and .5
elif abs(zscores[i]) < 0.5:
money += count*stock[i]
count = 0
return money
# Find the window length 0-254 that gives the highest returns using this strategy
length_scores = [trade(pricing, l) for l in range(255)]
best_length = np.argmax(length_scores)
print 'Best window length:', best_length
# Get pricing data for a different timeframe
start2 = '2013-01-01'
end2 = '2015-01-01'
pricing2 = get_pricing('MCD', fields='price', start_date=start2, end_date=end2)
# Find the returns during this period using what we think is the best window length
length_scores2 = [trade(pricing2, l) for l in range(255)]
print best_length, 'day window:', length_scores2[best_length]
# Find the best window length based on this dataset, and the returns using this window length
best_length2 = np.argmax(length_scores2)
print best_length2, 'day window:', length_scores2[best_length2]
plt.plot(length_scores)
plt.plot(length_scores2)
plt.xlabel('Window length')
plt.ylabel('Score')
plt.legend(['2011-2013', '2013-2015']);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: When working with real data, there is unlikely to ever be a situation where a ninth-degree polynomial is appropriate
Step2: However, when we use the same estimated parameters to model a different time period, we find that the single-variable model fits worse, while the multiple-variable model is entirely useless. It seems that the relationships we found are not consistent and are particular to the original sample period.
Step3: If we wanted, we could scan our universe for variables that were correlated with the dependent variable, and construct an extremely overfitted model. However, in most cases the correlation will be spurious, and the relationship will not continue into the future.
Step4: If we pick the length based on which seems best - say, on how well our model or algorithm performs - we are overfitting. Below we have a simple trading algorithm which bets on the stock price reverting to the rolling mean (for more details, check out the mean reversion notebook). We use the performance of this algorithm to score window lengths and find the best one. However, when we consider a different timeframe, this window length is far from optimal. This is because our original choice was overfitted to the sample data.
Step5: Clearly fitting to our sample data doesn't always give good results in the future. Just for fun, let's plot the length scores computed from the two different timeframes
|
3,731
|
<ASSISTANT_TASK:>
Python Code:
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'] )
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
print(type(test_set))
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 25,
validation_data = test_set,
validation_steps = 2000)
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialising the CNN
Step2: Step 1 - Convolution
Step3: Step 2 - Pooling
Step4: Adding a second convolutional layer
Step5: Step 3 - Flattening
Step6: Step 4 - Full connection
Step7: Compiling the CNN
Step8: ----------------------Part 2 - Fitting the CNN to the images-------------------------
Step9: ---------------------------Part 3 - Making new predictions---------------------------
|
3,732
|
<ASSISTANT_TASK:>
Python Code:
rst(O.to_iterable)
s = marble_stream("a--b-c|")
l, ts = [], time.time()
def on_next(listed):
print('got', listed, time.time()-ts)
for i in (1, 2):
d = s.subscribe(on_next)
# second run: only one value, the list.
s = s.to_list()
# both are started around same time -> check time deltas
rst(O.to_blocking)
ts = time.time()
s = O.interval(200).take(3)
sb = s.to_blocking()
# this is instant:
assert time.time() - ts < 0.2
print('''In some implementations of ReactiveX, there is also an operator that converts an Observable into a “Blocking” Observable. A Blocking Observable extends the ordinary Observable by providing a set of methods, operating on the items emitted by the Observable, that block. Some of the To operators are in this Blocking Obsevable set of extended operations.''')
# -> diffing dir(s) with dir(sb) we get:
# __iter__
# for_each
# observable
rst(sb.__iter__)
for i in (1, 2):
# not interleaved results:
for it in sb:
log(it)
rst(sb.for_each)
sb.for_each(log)
header(".observable -> getting async again")
# interleaved again:
d = subs(sb.observable, name='observer 1')
d = subs(sb.observable, name='observer 2')
rst(O.to_dict)
d = subs(O.from_('abc').to_dict(key_mapper=lambda x: x, element_mapper=lambda a: '%s%s' % (a, a)))
rst(O.to_future)
def emit(obs):
for ev in 'first', 'second':
sleep(.5)
log('emitting', ev)
obs.on_next(ev)
# vital for the future to get done:
obs.on_completed()
try:
# required for py2 (backport of guidos' tulip stuffs, now asyncio)
# caution: people say this is not production ready and will never be.
import trollius
f = rx.Observable.create(emit).to_future(trollius.Future)
# this is async, not a busy loop
log('future.result():', f.result())
except: # notebook should always run all cells
print ('skipping this; pip install trollius required')
rst(O.from_marbles)
d = subs(rx.Observable.from_string("1-(42)-3-|").to_blocking())
rst(O.to_set)
d = subs(O.from_("abcabc").to_set())
rst(O.subscribe_on)
# start simple:
header('Switching Schedulers')
s = O.just(42, reactivex.scheduler.ImmediateScheduler())
d = subs(s.subscribe_on(reactivex.scheduler.TimeoutScheduler()), name='SimpleSubs')
sleep(0.1)
header('Custom Subscription Side Effects')
from reactivex.scheduler.newthreadscheduler import NewThreadScheduler
from reactivex.scheduler.eventloopscheduler import EventLoopScheduler
class MySched(NewThreadScheduler):
'''For adding side effects at subscription and unsubscription time'''
def schedule(self, action, state=None):
log('new scheduling task', action)
scheduler = EventLoopScheduler(
thread_factory=self.thread_factory,
exit_if_empty=True)
return scheduler.schedule(action, state)
s = O.interval(200).take(2)
s = s.subscribe_on(MySched())
d = subs(s, name="subs1")
d = subs(s, name="subs2")
rst(O.observe_on)
from reactivex.scheduler.newthreadscheduler import NewThreadScheduler
header('defining a custom thread factory for a custom scheduler')
def my_thread_factory(target, args=None):
'just to show that also here we can customize'
t = threading.Thread(target=target, args=args or [])
t.setDaemon(True)
print ('\ncreated %s\n' % t.getName())
return t
class MySched:
def __init__(self):
self.rx_sched = NewThreadScheduler(my_thread_factory)
def __getattr__(self, a):
'called whenever the observe_on scheduler is on duty'
log('RX called', a, 'on mysched\n')
return getattr(self.rx_sched, a)
mysched = MySched()
s = O.interval(200).take(3) #.delay(100, mysched)
d = subs(s.observe_on(mysched))
sleep(2)
print 'all threads after finish:' # all cleaned up
print (' '.join([t.name for t in threading.enumerate()]))
rst(O.do_action)
def say(v=None):
if v:
log('NI!', v)
else:
log('EOF')
d = subs(O.range(10, 10).take(2).tap(say, on_completed=say))
rst(O.finally_action)
d = subs(O.on_error('err').take(2).finally_action(say))
rst(O.throw)
d = subs(O.range(1, 3).concat(O.on_error("ups")))
rst(O.timeout)
d = subs(marble_stream("a-b---c|").timeout(200, O.just('timeout')))
# this also works with absolute time. See docstring:
rst(O.timeout_with_selector)
d = subs(marble_stream("2-2-1-1|")\
.timeout_with_selector(
# you get the value and can adjust the timeout accordingly:
timeout_duration_mapper=lambda x: O.timer(100 * int(x)),
other=O.just('timeout')))
rst(O.catch_exception)
fubar1 = O.on_error('Ups')
fubar2 = O.on_error('Argh')
good = O.just(42)
d = subs(O.catch(fubar1, fubar2, good))
rst(O.on_error_resume_next)
bucket = [0]
def emitter(obs):
v = bucket[-1]
bucket.append(v)
for i in range(0, len(bucket) + 2):
obs.on_next(i)
if len(bucket) > 2:
log('notify error')
obs.on_error("ups")
log('notify complete')
obs.on_completed()
d = subs(O.on_error_resume_next(O.just('running'),
O.create(emitter),
O.create(emitter),
O.just('all good')
))
rst(O.retry)
ts = time.time()
def emit(obs):
dt = time.time() - ts
obs.on_next('try %s' % dt)
if dt < 1:
sleep(0.2)
log('error')
obs.on_error('ups')
obs.on_completed()
d = subs(O.create(emit).retry(10))
rst(O.using)
#d = subs(O.interval(1000).take(2))
lifetime = 2000
def result(disposable_resource_fac):
return O.just(disposable_resource_fac).delay(lifetime)
d2 = subs(O.using(lambda: subs(O.interval(100).take(1000), name='resource fac\n'),
result), name='outer stream\n')
rst(O.start)
def starter():
# called only once, async:
return 'start: ', time.time()
s = O.start(starter).concat(O.from_('abc'))
d = subs(s, name='sub1')
d = subs(s, name='sub2')
rst(O.start_async)
def emit(obs):
for ev in 'first', 'second':
sleep(.2)
log('emitting', ev)
obs.on_next(ev)
# vital for the future to get done:
obs.on_completed()
def future():
# only called once:
log('called future')
future = trollius.Future()
future.set_result(('42', time.time()))
future.set_exception(Exception('ups'))
return future
try:
# required for py2 (backport of guidos' tulip stuffs, now asyncio)
# caution: people say this is not production ready and will never be.
import trollius
s = O.start_async(future)
d = subs(s, name='subs1')
# same result:
d = subs(s, name='subs2')
except Exception as ex: # notebook should always run all cells
print ('%s skipping this; pip install trollius required' % ex)
rst(O.to_async)
d = subs(O.to_async(lambda x, y: x + y)(4, 3) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I want an operator to operate on a particular Scheduler
Step2: ...when it notifies observers observe_on
Step3: I want an Observable to invoke a particular action when certain events occur do_action/tap, finally_action
Step4: I want an Observable that will notify observers of an error throw**
Step5: ...if a specified period of time elapses without it emitting an item timeout / timeout_with_selector**
Step6: I want an Observable to recover gracefully
Step7: ... by attempting to resubscribe to the upstream Observable retry
Step8: I want to create a resource that has the same lifespan as the Observable using
Step9: I want to subscribe to an Observable and receive a Future that blocks until the Observable completes start, start_async, to_async
|
3,733
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from numpy.linalg import inv, norm
def objective(P, q, r, x):
Return the value of the Standard form QP using the current value of x.
return 0.5 * np.dot(x, np.dot(P, x)) + np.dot(q, x) + r
def qp_admm(P, q, r, lb, ub,
max_iter=1000,
rho=1.0,
alpha=1.2,
atol=1e-4,
rtol=1e-2):
n = P.shape[0]
x = np.zeros(n)
z = np.zeros(n)
u = np.zeros(n)
history = []
R = inv(P + rho * np.eye(n))
for k in range(1, max_iter+1):
x = np.dot(R, (z - u) - q)
# z-update with relaxation
z_old = z
x_hat = alpha * x +(1 - alpha) * z_old
z = np.minimum(ub, np.maximum(lb, x_hat + u))
# u-update
u = u + (x_hat - z)
# diagnostics, and termination checks
objval = objective(P, q, r, x)
r_norm = norm(x - z)
s_norm = norm(-rho * (z - z_old))
eps_pri = np.sqrt(n) * atol + rtol * np.maximum(norm(x), norm(-z))
eps_dual = np.sqrt(n)* atol + rtol * norm(rho*u)
history.append({
'objval' : objval,
'r_norm' : r_norm,
's_norm' : s_norm,
'eps_pri' : eps_pri,
'eps_dual': eps_dual,
})
if r_norm < eps_pri and s_norm < eps_dual:
print('Optimization terminated after {} iterations'.format(k))
break;
history = pd.DataFrame(history)
return x, history
import cvxpy as cvx
def qp_cvxpy(P, q, r, lb, ub,
max_iter=1000,
atol=1e-4,
rtol=1e-2):
n = P.shape[0]
# The variable we want to solve for
x = cvx.Variable(n)
constraints = [x >= cvx.Constant(lb), x <= cvx.Constant(ub)]
# Construct the QP expression using CVX Primitives
# Note that in the CVX-meta language '*' of vectors of matrices indicates dot product,
# not elementwise multiplication
expr = cvx.Constant(0.5) * cvx.quad_form(x, cvx.Constant(P)) + cvx.Constant(q) * x + cvx.Constant(r)
qp = cvx.Problem(cvx.Minimize(expr), constraints=constraints)
qp.solve(max_iters=max_iter, abstol=atol, reltol=rtol, verbose=True)
# The result is a Matrix object. Make it an NDArray and drop of 2nd dimension i.e. make it a vector.
x_opt = np.array(x.value).squeeze()
return x_opt
import requests
from statsmodels.stats.moment_helpers import corr2cov
from functools import lru_cache
@lru_cache(maxsize=5)
def get_cov(filename):
url = r'http://people.brunel.ac.uk/~mastjjb/jeb/orlib/files/{}.txt'.format(filename)
data = requests.get(url).text
lines = [line.strip() for line in data.split('\n')]
# First line is the number of assets
n_assets = int(lines[0])
# Next n_assets lines contain the space separated mean and stddev. of returns for each asset
means_and_sds = pd.DataFrame(
data=np.nan,
index=range(0, n_assets),
columns=['ret_mean', 'ret_std'])
# Next n_assetsC2 lines contain the 1-based row and column index and the corresponding correlation
for i in range(0, n_assets):
mean, sd = map(float, lines[1+i].split())
means_and_sds.loc[i, ['ret_mean', 'ret_std']] = [mean, sd]
n_corrs = (n_assets * (n_assets + 1)) // 2
corrs = pd.DataFrame(index=range(n_assets), columns=range(n_assets), data=np.nan)
for i in range(0, n_corrs):
row, col, corr = lines[n_assets + 1 + i].split()
row, col = int(row)-1, int(col)-1
corr = float(corr)
corrs.loc[row, col] = corr
corrs.loc[col, row] = corr
cov = corr2cov(corrs, means_and_sds.ret_std)
return cov
from numpy.random import RandomState
rng = RandomState(0)
P = get_cov('port1')
n = P.shape[0]
alphas = rng.uniform(-0.4, 0.4, size=n)
q = -alphas
ub = np.ones_like(q)
lb = np.zeros_like(q)
r = 0
%%time
x_opt_admm, history = qp_admm(P, q, r, lb, ub)
fig, ax = plt.subplots(history.shape[1], 1, figsize=(10, 8))
ax = history.plot(subplots=True, ax=ax, rot=0)
%%time
x_opt_cvxpy = qp_cvxpy(P, q, r, lb, ub)
holdings = pd.DataFrame(np.column_stack([x_opt_admm, x_opt_cvxpy]), columns=['opt_admm', 'opt_cvxpy'])
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
ax = holdings.plot(kind='bar', ax=ax, rot=0)
labels = ax.set(xlabel='Assets', ylabel='Holdings')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Pure-Python ADMM Implementation
Step3: QP Solver using CVXPY
Step4: Generate Optimal Portfolio Holdings
Step5: Set up the Portfolio Optimization problem as a QP
Step6: Using ADMM
Step7: Using CVXPY
Step8: Optimal Holdings Comparison
|
3,734
|
<ASSISTANT_TASK:>
Python Code:
!pip -q install rdkit-pypi==2021.9.4
import ast
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from rdkit import Chem, RDLogger
from rdkit.Chem import BondType
from rdkit.Chem.Draw import MolsToGridImage
RDLogger.DisableLog("rdApp.*")
csv_path = keras.utils.get_file(
"/content/250k_rndm_zinc_drugs_clean_3.csv",
"https://raw.githubusercontent.com/aspuru-guzik-group/chemical_vae/master/models/zinc_properties/250k_rndm_zinc_drugs_clean_3.csv",
)
df = pd.read_csv("/content/250k_rndm_zinc_drugs_clean_3.csv")
df["smiles"] = df["smiles"].apply(lambda s: s.replace("\n", ""))
df.head()
SMILE_CHARSET = '["C", "B", "F", "I", "H", "O", "N", "S", "P", "Cl", "Br"]'
bond_mapping = {"SINGLE": 0, "DOUBLE": 1, "TRIPLE": 2, "AROMATIC": 3}
bond_mapping.update(
{0: BondType.SINGLE, 1: BondType.DOUBLE, 2: BondType.TRIPLE, 3: BondType.AROMATIC}
)
SMILE_CHARSET = ast.literal_eval(SMILE_CHARSET)
MAX_MOLSIZE = max(df["smiles"].str.len())
SMILE_to_index = dict((c, i) for i, c in enumerate(SMILE_CHARSET))
index_to_SMILE = dict((i, c) for i, c in enumerate(SMILE_CHARSET))
atom_mapping = dict(SMILE_to_index)
atom_mapping.update(index_to_SMILE)
BATCH_SIZE = 100
EPOCHS = 10
VAE_LR = 5e-4
NUM_ATOMS = 120 # Maximum number of atoms
ATOM_DIM = len(SMILE_CHARSET) # Number of atom types
BOND_DIM = 4 + 1 # Number of bond types
LATENT_DIM = 435 # Size of the latent space
def smiles_to_graph(smiles):
# Converts SMILES to molecule object
molecule = Chem.MolFromSmiles(smiles)
# Initialize adjacency and feature tensor
adjacency = np.zeros((BOND_DIM, NUM_ATOMS, NUM_ATOMS), "float32")
features = np.zeros((NUM_ATOMS, ATOM_DIM), "float32")
# loop over each atom in molecule
for atom in molecule.GetAtoms():
i = atom.GetIdx()
atom_type = atom_mapping[atom.GetSymbol()]
features[i] = np.eye(ATOM_DIM)[atom_type]
# loop over one-hop neighbors
for neighbor in atom.GetNeighbors():
j = neighbor.GetIdx()
bond = molecule.GetBondBetweenAtoms(i, j)
bond_type_idx = bond_mapping[bond.GetBondType().name]
adjacency[bond_type_idx, [i, j], [j, i]] = 1
# Where no bond, add 1 to last channel (indicating "non-bond")
# Notice: channels-first
adjacency[-1, np.sum(adjacency, axis=0) == 0] = 1
# Where no atom, add 1 to last column (indicating "non-atom")
features[np.where(np.sum(features, axis=1) == 0)[0], -1] = 1
return adjacency, features
def graph_to_molecule(graph):
# Unpack graph
adjacency, features = graph
# RWMol is a molecule object intended to be edited
molecule = Chem.RWMol()
# Remove "no atoms" & atoms with no bonds
keep_idx = np.where(
(np.argmax(features, axis=1) != ATOM_DIM - 1)
& (np.sum(adjacency[:-1], axis=(0, 1)) != 0)
)[0]
features = features[keep_idx]
adjacency = adjacency[:, keep_idx, :][:, :, keep_idx]
# Add atoms to molecule
for atom_type_idx in np.argmax(features, axis=1):
atom = Chem.Atom(atom_mapping[atom_type_idx])
_ = molecule.AddAtom(atom)
# Add bonds between atoms in molecule; based on the upper triangles
# of the [symmetric] adjacency tensor
(bonds_ij, atoms_i, atoms_j) = np.where(np.triu(adjacency) == 1)
for (bond_ij, atom_i, atom_j) in zip(bonds_ij, atoms_i, atoms_j):
if atom_i == atom_j or bond_ij == BOND_DIM - 1:
continue
bond_type = bond_mapping[bond_ij]
molecule.AddBond(int(atom_i), int(atom_j), bond_type)
# Sanitize the molecule; for more information on sanitization, see
# https://www.rdkit.org/docs/RDKit_Book.html#molecular-sanitization
flag = Chem.SanitizeMol(molecule, catchErrors=True)
# Let's be strict. If sanitization fails, return None
if flag != Chem.SanitizeFlags.SANITIZE_NONE:
return None
return molecule
train_df = df.sample(frac=0.75, random_state=42) # random state is a seed value
train_df.reset_index(drop=True, inplace=True)
adjacency_tensor, feature_tensor, qed_tensor = [], [], []
for idx in range(8000):
adjacency, features = smiles_to_graph(train_df.loc[idx]["smiles"])
qed = train_df.loc[idx]["qed"]
adjacency_tensor.append(adjacency)
feature_tensor.append(features)
qed_tensor.append(qed)
adjacency_tensor = np.array(adjacency_tensor)
feature_tensor = np.array(feature_tensor)
qed_tensor = np.array(qed_tensor)
class RelationalGraphConvLayer(keras.layers.Layer):
def __init__(
self,
units=128,
activation="relu",
use_bias=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
**kwargs
):
super().__init__(**kwargs)
self.units = units
self.activation = keras.activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = keras.regularizers.get(bias_regularizer)
def build(self, input_shape):
bond_dim = input_shape[0][1]
atom_dim = input_shape[1][2]
self.kernel = self.add_weight(
shape=(bond_dim, atom_dim, self.units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
name="W",
dtype=tf.float32,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(bond_dim, 1, self.units),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
name="b",
dtype=tf.float32,
)
self.built = True
def call(self, inputs, training=False):
adjacency, features = inputs
# Aggregate information from neighbors
x = tf.matmul(adjacency, features[:, None, :, :])
# Apply linear transformation
x = tf.matmul(x, self.kernel)
if self.use_bias:
x += self.bias
# Reduce bond types dim
x_reduced = tf.reduce_sum(x, axis=1)
# Apply non-linear transformation
return self.activation(x_reduced)
def get_encoder(
gconv_units, latent_dim, adjacency_shape, feature_shape, dense_units, dropout_rate
):
adjacency = keras.layers.Input(shape=adjacency_shape)
features = keras.layers.Input(shape=feature_shape)
# Propagate through one or more graph convolutional layers
features_transformed = features
for units in gconv_units:
features_transformed = RelationalGraphConvLayer(units)(
[adjacency, features_transformed]
)
# Reduce 2-D representation of molecule to 1-D
x = keras.layers.GlobalAveragePooling1D()(features_transformed)
# Propagate through one or more densely connected layers
for units in dense_units:
x = layers.Dense(units, activation="relu")(x)
x = layers.Dropout(dropout_rate)(x)
z_mean = layers.Dense(latent_dim, dtype="float32", name="z_mean")(x)
log_var = layers.Dense(latent_dim, dtype="float32", name="log_var")(x)
encoder = keras.Model([adjacency, features], [z_mean, log_var], name="encoder")
return encoder
def get_decoder(dense_units, dropout_rate, latent_dim, adjacency_shape, feature_shape):
latent_inputs = keras.Input(shape=(latent_dim,))
x = latent_inputs
for units in dense_units:
x = keras.layers.Dense(units, activation="tanh")(x)
x = keras.layers.Dropout(dropout_rate)(x)
# Map outputs of previous layer (x) to [continuous] adjacency tensors (x_adjacency)
x_adjacency = keras.layers.Dense(tf.math.reduce_prod(adjacency_shape))(x)
x_adjacency = keras.layers.Reshape(adjacency_shape)(x_adjacency)
# Symmetrify tensors in the last two dimensions
x_adjacency = (x_adjacency + tf.transpose(x_adjacency, (0, 1, 3, 2))) / 2
x_adjacency = keras.layers.Softmax(axis=1)(x_adjacency)
# Map outputs of previous layer (x) to [continuous] feature tensors (x_features)
x_features = keras.layers.Dense(tf.math.reduce_prod(feature_shape))(x)
x_features = keras.layers.Reshape(feature_shape)(x_features)
x_features = keras.layers.Softmax(axis=2)(x_features)
decoder = keras.Model(
latent_inputs, outputs=[x_adjacency, x_features], name="decoder"
)
return decoder
class Sampling(layers.Layer):
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_log_var)[0]
dim = tf.shape(z_log_var)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class MoleculeGenerator(keras.Model):
def __init__(self, encoder, decoder, max_len, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.property_prediction_layer = layers.Dense(1)
self.max_len = max_len
self.train_total_loss_tracker = keras.metrics.Mean(name="train_total_loss")
self.val_total_loss_tracker = keras.metrics.Mean(name="val_total_loss")
def train_step(self, data):
adjacency_tensor, feature_tensor, qed_tensor = data[0]
graph_real = [adjacency_tensor, feature_tensor]
self.batch_size = tf.shape(qed_tensor)[0]
with tf.GradientTape() as tape:
z_mean, z_log_var, qed_pred, gen_adjacency, gen_features = self(
graph_real, training=True
)
graph_generated = [gen_adjacency, gen_features]
total_loss = self._compute_loss(
z_log_var, z_mean, qed_tensor, qed_pred, graph_real, graph_generated
)
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.train_total_loss_tracker.update_state(total_loss)
return {"loss": self.train_total_loss_tracker.result()}
def _compute_loss(
self, z_log_var, z_mean, qed_true, qed_pred, graph_real, graph_generated
):
adjacency_real, features_real = graph_real
adjacency_gen, features_gen = graph_generated
adjacency_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.categorical_crossentropy(adjacency_real, adjacency_gen),
axis=(1, 2),
)
)
features_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.categorical_crossentropy(features_real, features_gen),
axis=(1),
)
)
kl_loss = -0.5 * tf.reduce_sum(
1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var), 1
)
kl_loss = tf.reduce_mean(kl_loss)
property_loss = tf.reduce_mean(
keras.losses.binary_crossentropy(qed_true, qed_pred)
)
graph_loss = self._gradient_penalty(graph_real, graph_generated)
return kl_loss + property_loss + graph_loss + adjacency_loss + features_loss
def _gradient_penalty(self, graph_real, graph_generated):
# Unpack graphs
adjacency_real, features_real = graph_real
adjacency_generated, features_generated = graph_generated
# Generate interpolated graphs (adjacency_interp and features_interp)
alpha = tf.random.uniform([self.batch_size])
alpha = tf.reshape(alpha, (self.batch_size, 1, 1, 1))
adjacency_interp = (adjacency_real * alpha) + (1 - alpha) * adjacency_generated
alpha = tf.reshape(alpha, (self.batch_size, 1, 1))
features_interp = (features_real * alpha) + (1 - alpha) * features_generated
# Compute the logits of interpolated graphs
with tf.GradientTape() as tape:
tape.watch(adjacency_interp)
tape.watch(features_interp)
_, _, logits, _, _ = self(
[adjacency_interp, features_interp], training=True
)
# Compute the gradients with respect to the interpolated graphs
grads = tape.gradient(logits, [adjacency_interp, features_interp])
# Compute the gradient penalty
grads_adjacency_penalty = (1 - tf.norm(grads[0], axis=1)) ** 2
grads_features_penalty = (1 - tf.norm(grads[1], axis=2)) ** 2
return tf.reduce_mean(
tf.reduce_mean(grads_adjacency_penalty, axis=(-2, -1))
+ tf.reduce_mean(grads_features_penalty, axis=(-1))
)
def inference(self, batch_size):
z = tf.random.normal((batch_size, LATENT_DIM))
reconstruction_adjacency, reconstruction_features = model.decoder.predict(z)
# obtain one-hot encoded adjacency tensor
adjacency = tf.argmax(reconstruction_adjacency, axis=1)
adjacency = tf.one_hot(adjacency, depth=BOND_DIM, axis=1)
# Remove potential self-loops from adjacency
adjacency = tf.linalg.set_diag(adjacency, tf.zeros(tf.shape(adjacency)[:-1]))
# obtain one-hot encoded feature tensor
features = tf.argmax(reconstruction_features, axis=2)
features = tf.one_hot(features, depth=ATOM_DIM, axis=2)
return [
graph_to_molecule([adjacency[i].numpy(), features[i].numpy()])
for i in range(batch_size)
]
def call(self, inputs):
z_mean, log_var = self.encoder(inputs)
z = Sampling()([z_mean, log_var])
gen_adjacency, gen_features = self.decoder(z)
property_pred = self.property_prediction_layer(z_mean)
return z_mean, log_var, property_pred, gen_adjacency, gen_features
vae_optimizer = tf.keras.optimizers.Adam(learning_rate=VAE_LR)
encoder = get_encoder(
gconv_units=[9],
adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS),
feature_shape=(NUM_ATOMS, ATOM_DIM),
latent_dim=LATENT_DIM,
dense_units=[512],
dropout_rate=0.0,
)
decoder = get_decoder(
dense_units=[128, 256, 512],
dropout_rate=0.2,
latent_dim=LATENT_DIM,
adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS),
feature_shape=(NUM_ATOMS, ATOM_DIM),
)
model = MoleculeGenerator(encoder, decoder, MAX_MOLSIZE)
model.compile(vae_optimizer)
history = model.fit([adjacency_tensor, feature_tensor, qed_tensor], epochs=EPOCHS)
molecules = model.inference(1000)
MolsToGridImage(
[m for m in molecules if m is not None][:1000], molsPerRow=5, subImgSize=(260, 160)
)
def plot_latent(vae, data, labels):
# display a 2D plot of the property in the latent space
z_mean, _ = vae.encoder.predict(data)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show()
plot_latent(model, [adjacency_tensor[:8000], feature_tensor[:8000]], qed_tensor[:8000])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dataset
Step2: Hyperparameters
Step3: Generate training set
Step4: Build the Encoder and Decoder
Step5: Build the Sampling layer
Step6: Build the VAE
Step7: Train the model
Step8: Inference
Step9: Display latent space clusters with respect to molecular properties (QAE)
|
3,735
|
<ASSISTANT_TASK:>
Python Code:
# Import required libraries
from tpot import TPOTClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
# Load the data
titanic = pd.read_csv('data/titanic_train.csv')
titanic.head(5)
titanic.groupby('Sex').Survived.value_counts()
titanic.groupby(['Pclass','Sex']).Survived.value_counts()
id = pd.crosstab([titanic.Pclass, titanic.Sex], titanic.Survived.astype(float))
id.div(id.sum(1).astype(float), 0)
titanic.rename(columns={'Survived': 'class'}, inplace=True)
titanic.dtypes
for cat in ['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']:
print("Number of levels in category '{0}': \b {1:2.2f} ".format(cat, titanic[cat].unique().size))
for cat in ['Sex', 'Embarked']:
print("Levels for catgeory '{0}': {1}".format(cat, titanic[cat].unique()))
titanic['Sex'] = titanic['Sex'].map({'male':0,'female':1})
titanic['Embarked'] = titanic['Embarked'].map({'S':0,'C':1,'Q':2})
titanic = titanic.fillna(-999)
pd.isnull(titanic).any()
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer()
CabinTrans = mlb.fit_transform([{str(val)} for val in titanic['Cabin'].values])
CabinTrans
titanic_new = titanic.drop(['Name','Ticket','Cabin','class'], axis=1)
assert (len(titanic['Cabin'].unique()) == len(mlb.classes_)), "Not Equal" #check correct encoding done
titanic_new = np.hstack((titanic_new.values,CabinTrans))
np.isnan(titanic_new).any()
titanic_new[0].size
titanic_class = titanic['class'].values
training_indices, validation_indices = training_indices, testing_indices = train_test_split(titanic.index, stratify = titanic_class, train_size=0.75, test_size=0.25)
training_indices.size, validation_indices.size
tpot = TPOTClassifier(verbosity=2, max_time_mins=2, max_eval_time_mins=0.04, population_size=40)
tpot.fit(titanic_new[training_indices], titanic_class[training_indices])
tpot.score(titanic_new[validation_indices], titanic.loc[validation_indices, 'class'].values)
tpot.export('tpot_titanic_pipeline.py')
# %load tpot_titanic_pipeline.py
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_classes, testing_classes = \
train_test_split(features, tpot_data['target'], random_state=None)
exported_pipeline = RandomForestClassifier(bootstrap=False, max_features=0.4, min_samples_leaf=1, min_samples_split=9)
exported_pipeline.fit(training_features, training_classes)
results = exported_pipeline.predict(testing_features)
# Read in the submission dataset
titanic_sub = pd.read_csv('data/titanic_test.csv')
titanic_sub.describe()
for var in ['Cabin']: #,'Name','Ticket']:
new = list(set(titanic_sub[var]) - set(titanic[var]))
titanic_sub.ix[titanic_sub[var].isin(new), var] = -999
titanic_sub['Sex'] = titanic_sub['Sex'].map({'male':0,'female':1})
titanic_sub['Embarked'] = titanic_sub['Embarked'].map({'S':0,'C':1,'Q':2})
titanic_sub = titanic_sub.fillna(-999)
pd.isnull(titanic_sub).any()
from sklearn.preprocessing import MultiLabelBinarizer
mlb = MultiLabelBinarizer()
SubCabinTrans = mlb.fit([{str(val)} for val in titanic['Cabin'].values]).transform([{str(val)} for val in titanic_sub['Cabin'].values])
titanic_sub = titanic_sub.drop(['Name','Ticket','Cabin'], axis=1)
# Form the new submission data set
titanic_sub_new = np.hstack((titanic_sub.values,SubCabinTrans))
np.any(np.isnan(titanic_sub_new))
# Ensure equal number of features in both the final training and submission dataset
assert (titanic_new.shape[1] == titanic_sub_new.shape[1]), "Not Equal"
# Generate the predictions
submission = tpot.predict(titanic_sub_new)
# Create the submission file
final = pd.DataFrame({'PassengerId': titanic_sub['PassengerId'], 'Survived': submission})
final.to_csv('data/submission.csv', index = False)
final.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Exploration
Step2: Data Munging
Step3: At present, TPOT requires all the data to be in numerical format. As we can see below, our data set has 5 categorical variables which contain non-numerical values
Step4: We then check the number of levels that each of the five categorical variables have.
Step5: As we can see, Sex and Embarked have few levels. Let's find out what they are.
Step6: We then code these levels manually into numerical values. For nan i.e. the missing values, we simply replace them with a placeholder value (-999). In fact, we perform this replacement for the entire data set.
Step7: Since Name and Ticket have so many levels, we drop them from our analysis for the sake of simplicity. For Cabin, we encode the levels as digits using Scikit-learn's MultiLabelBinarizer and treat them as new features.
Step8: Drop the unused features from the dataset.
Step9: We then add the encoded features to form the final dataset to be used with TPOT.
Step10: Keeping in mind that the final dataset is in the form of a numpy array, we can check the number of features in the final dataset as follows.
Step11: Finally we store the class labels, which we need to predict, in a separate variable.
Step12: Data Analysis using TPOT
Step13: After that, we proceed to calling the fit, score and export functions on our training dataset. To get a better idea of how these functions work, refer the TPOT documentation here.
Step14: Let's have a look at the generated code. As we can see, the random forest classifier performed the best on the given dataset out of all the other models that TPOT currently evaluates on. If we ran TPOT for more generations, then the score should improve further.
Step15: Make predictions on the submission data
Step16: The most important step here is to check for new levels in the categorical variables of the submission dataset that are absent in the training set. We identify them and set them to our placeholder value of '-999', i.e., we treat them as missing values. This ensures training consistency, as otherwise the model does not know what to do with the new levels in the submission dataset.
Step17: We then carry out the data munging steps as done earlier for the training dataset.
Step18: While calling MultiLabelBinarizer for the submission data set, we first fit on the training set again to learn the levels and then transform the submission dataset values. This further ensures that only those levels that were present in the training dataset are transformed. If new levels are still found in the submission dataset then it will return an error and we need to go back and check our earlier step of replacing new levels with the placeholder value.
|
3,736
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame(dict(col1=[[1, 2, 3],[4,5]]))
def g(df):
for i in df.index:
df.loc[i, 'col1'] = df.loc[i, 'col1'][::-1]
L = df.col1.sum()
L = map(lambda x:str(x), L)
return ','.join(L)
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,737
|
<ASSISTANT_TASK:>
Python Code:
## Can build up a dict by starting with the the empty dict {}
## and storing key/value pairs into the dict like this:
## dict[key] = value-for-that-key
dict = {}
dict['a'] = 'alpha'
dict['g'] = 'gamma'
dict['o'] = 'omega'
print dict
print dict['a']
dict['a'] = 6
print dict['a']
'a' in dict
print dict['z'] ## Throws KeyError
if 'z' in dict: print dict['z'] ## Avoid KeyError
print dict.get('z') ## None (instead of KeyError)
## By default, iterating over a dict iterates over its keys.
## Note that the keys are in a random order.
for key in dict: print key
## Exactly the same as above
for key in dict.keys(): print key
## Get the .keys() list:
print dict.keys()
## Likewise, there's a .values() list of values
print dict.values() ## ['alpha', 'omega', 'gamma']
## Common case -- loop over the keys in sorted order,
## accessing each key/value
for key in sorted(dict.keys()):
print key, dict[key]
## .items() is the dict expressed as (key, value) tuples
print dict.items()
## This loop syntax accesses the whole dict by looping
## over the .items() tuple list, accessing one (key, value)
## pair on each iteration.
for k, v in dict.items(): print k, '>', v
hash = {}
hash['word'] = 'garfield'
hash['count'] = 42
s = 'I want %(count)d copies of %(word)s' % hash
s
var = 6
del var # var no more!
var
list = ['a', 'b', 'c', 'd']
del list[0] ## Delete first element
del list[-2:] ## Delete last two elements
print list
dict = {'a':1, 'b':2, 'c':3}
del dict['b'] ## Delete 'b' entry
print dict
# Echo the contents of a file
f = open('data/poem.txt', 'rU')
for line in f: ## iterates over the lines of the file
print line, ## trailing , so print does not add an end-of-line char
## since 'line' already includes the end-of line.
f.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A for loop on a dictionary iterates over its keys by default. The keys will appear in an arbitrary order. The methods dict.keys() and dict.values() return lists of the keys or values explicitly. There's also an items() which returns a list of (key, value) tuples, which is the most efficient way to examine all the key value data in the dictionary. All of these lists can be passed to the sorted() function.
Step2: There are "iter" variants of these methods called iterkeys(), itervalues() and iteritems() which avoid the cost of constructing the whole list -- a performance win if the data is huge. However, I generally prefer the plain keys() and values() methods with their sensible names.
Step3: Del
Step4: Files
|
3,738
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32,[None, 28, 28, 1])
targets_ = tf.placeholder(tf.float32, [None, 28, 28, 1])
### Encoder
conv1 = tf.layers.conv2d(
inputs=inputs_,
filters=16,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding="same")
# Now 14x14x16
conv2 = tf.layers.conv2d(
inputs=maxpool1,
filters=8,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, padding="same")
# Now 7x7x8
conv3 = tf.layers.conv2d(
inputs=maxpool2,
filters=8,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2, padding="same")
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, [7, 7])
# Now 7x7x8
conv4 = tf.layers.conv2d(
inputs=upsample1,
filters=8,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, [14, 14])
# Now 14x14x8
conv5 = tf.layers.conv2d(
inputs=upsample2,
filters=8,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, [28, 28])
# Now 28x28x8
conv6 = tf.layers.conv2d(
inputs=upsample3,
filters=16,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(
inputs=conv6,
filters=1,
kernel_size=[5, 5],
padding="same",
activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
epochs = 2
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(
inputs=inputs_,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding="same")
# Now 14x14x32
conv2 = tf.layers.conv2d(
inputs=maxpool1,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, padding="same")
# Now 7x7x32
conv3 = tf.layers.conv2d(
inputs=maxpool2,
filters=16,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2, padding="same")
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, [7, 7])
# Now 7x7x16
conv4 = tf.layers.conv2d(
inputs=upsample1,
filters=16,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, [14, 14])
# Now 14x14x16
conv5 = tf.layers.conv2d(
inputs=upsample2,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, [28, 28])
# Now 28x28x32
conv6 = tf.layers.conv2d(
inputs=upsample3,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(
inputs=conv6,
filters=1,
kernel_size=[3, 3],
padding="same",
activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded =tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 5
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
3,739
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from bokeh.embed import file_html
from bokeh.io import output_notebook, show
from bokeh.layouts import layout
from bokeh.models import (
ColumnDataSource, Plot, Circle, Range1d, LinearAxis, HoverTool,
Text, SingleIntervalTicker, Slider, CustomJS)
from bokeh.palettes import Spectral6
output_notebook()
import bokeh.sampledata
bokeh.sampledata.download()
def process_data():
from bokeh.sampledata.gapminder import fertility, life_expectancy, population, regions
# Make the column names ints not strings for handling
columns = list(fertility.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility = fertility.rename(columns=rename_dict)
life_expectancy = life_expectancy.rename(columns=rename_dict)
population = population.rename(columns=rename_dict)
regions = regions.rename(columns=rename_dict)
# Turn population into bubble sizes. Use min_size and factor to tweak.
scale_factor = 200
population_size = np.sqrt(population / np.pi) / scale_factor
min_size = 3
population_size = population_size.where(population_size >= min_size).fillna(min_size)
# Use pandas categories and categorize & color the regions
regions.Group = regions.Group.astype('category')
regions_list = list(regions.Group.cat.categories)
def get_color(r):
return Spectral6[regions_list.index(r.Group)]
regions['region_color'] = regions.apply(get_color, axis=1)
return fertility, life_expectancy, population_size, regions, years, regions_list
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions = process_data()
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
# Set up the plot
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
min_border=20,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(ticker=SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# Add the year in background (add before circle)
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
text_x = 7
text_y = 95
for i, region in enumerate(regions):
plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))
text_y = text_y - 5
# Add the slider
code =
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
text_source.set('data', {'year': [String(year)]});
% js_source_array
callback = CustomJS(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback)
callback.args["renderer_source"] = renderer_source
callback.args["slider"] = slider
callback.args["text_source"] = text_source
# Stick the plot and the slider together
show(layout([[plot], [slider]], sizing_mode='scale_width'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up the data
Step2: sources looks like this
Step3: Build the plot
Step4: Build the axes
Step5: Add the background year text
Step6: Add the bubbles and hover
Step7: Add the legend
Step9: Add the slider and callback
Step10: Render together with a slider
|
3,740
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from IPython.display import HTML # intégration notebook
%matplotlib inline
def plot_cmap(cmap, ncolor=6):
A convenient function to plot colors of a matplotlib cmap
Args:
ncolor (int): number of color to show
cmap: a cmap object or a matplotlib color name
if isinstance(cmap, str):
try:
cm = plt.get_cmap(cmap)
except ValueError:
print("WARNINGS :", cmap, " is not a known colormap")
cm = plt.cm.gray
else:
cm = cmap
with plt.rc_context(plt.rcParamsDefault):
fig = plt.figure(figsize=(6, 1), frameon=False)
ax = fig.add_subplot(111)
ax.pcolor(np.linspace(1, ncolor, ncolor).reshape(1, ncolor), cmap=cm)
ax.set_title(cm.name)
xt = ax.set_xticks([])
yt = ax.set_yticks([])
return fig
def show_colors(colors):
Draw a square for each color contained in the colors list
given in argument.
with plt.rc_context(plt.rcParamsDefault):
fig = plt.figure(figsize=(6, 1), frameon=False)
ax = fig.add_subplot(111)
for x, color in enumerate(colors):
ax.add_patch(
mpl.patches.Rectangle(
(x, 0), 1, 1, facecolor=color
)
)
ax.set_xlim((0, len(colors)))
ax.set_ylim((0, 1))
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect("equal")
return fig
plot_cmap("Dark2", 4)
plot_cmap("Dark2", 4).savefig("img/qualitative.png", bbox_inches="tight")
plot_cmap("Blues", 8)
plot_cmap("Blues", 8).savefig("img/sequentielle.png", bbox_inches="tight")
plot_cmap("Blues_r", 8)
plot_cmap("Blues_r", 8).savefig("img/sequentielle_r.png", bbox_inches="tight")
plot_cmap("coolwarm", 9)
plot_cmap("coolwarm", 9).savefig("img/divergente.png", bbox_inches="tight")
plot_cmap(plt.cm.summer, 6)
plot_cmap(plt.cm.summer, 6).savefig("img/summer.png", bbox_inches="tight")
plt.cm.summer(X=42)
print("Max val = ", plt.cm.summer.N)
palette = plt.cm.summer(X=[1, 50, 100, 200], alpha=.6)
print(palette)
show_colors(palette)
show_colors(palette).savefig("img/mpl_palette1.png")
normalize = mpl.colors.Normalize(vmin=-5, vmax=5)
palette = plt.cm.summer(X=normalize([-4, -2, 0, 2, 4]), alpha=1)
print(palette)
show_colors(palette)
show_colors(palette).savefig("img/mpl_palette2.png")
import colorlover as cl
HTML(cl.to_html(cl.scales["4"]["div"]))
cl.scales["4"]["div"]["PuOr"]
cl.to_numeric(cl.scales["4"]["div"]["PuOr"])
import seaborn as sns
current_palette = sns.color_palette()
sns.palplot(current_palette)
sns.palplot(sns.color_palette("husl", 8))
sns.palplot(sns.light_palette("violet", 4))
sns.palplot(sns.diverging_palette(220, 20, n=5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Functions
Step4: Color models
Step5: Sequential palettes
Step6: Reverse order
Step7: Divergent palettes
Step8: Build a custum color palette
Step9: colormap returns a rgba color.
Step10: X is a float number or a list or an array
Step11: You can change interval values using Normalize
Step12: With colorlover
Step13: Colorlover provides function to set up a color palette
Step14: Divergent color palette PuOr with 4 colors
Step15: conversion in RGB triplets
Step16: With seaborn
Step17: The documentation is really clear and provides a nice tutorial seaborn color palettes. The following juste provides simple exampl cases.
Step18: A qualitative palette
Step19: A sequential palette
Step20: A divergente palette
|
3,741
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]])
y = np.array([0, 1, 1, 0])
pd.DataFrame(np.hstack((X, y.reshape(-1, 1))), columns=['x1', 'x2', 'y'])
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(X[y==0, 0], X[y==0, 1], c='red');
plt.scatter(X[y==1, 0], X[y==1, 1], c='blue');
plt.xlabel('x1');
plt.ylabel('x2');
plt.legend(['0', '1'], loc='center');
def xor_decision_tree(x):
x1, x2 = x
if x1 == 0:
if x2 == 0:
return 0
elif x2 == 1:
return 1
elif x1 == 1:
if x2 == 0:
return 1
elif x2 == 1:
return 0
# accuracy
(list(map(xor_decision_tree, X)) == y).mean()
import numpy as np
person_std = np.sqrt(10.)
n_simulations = 10000
B = 10
opinions = np.random.normal(scale=person_std, size = (n_simulations, B))
# lets average opinions
avg_opinions = opinions.mean(axis=1)
# let's look at the variance
np.var(avg_opinions)
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(np.linspace(1, 10, num=10), 10/np.linspace(1, 10, num=10));
plt.xlabel('B');
plt.ylabel('Variance');
plt.ylim([0, 12]);
# wisdom of the crowds with opinion correlation
corr = 0.9
B = 1000
cov = (np.ones((B, B)) * corr + np.eye(B)*(1-corr))*(person_std**2)
mu = np.zeros(B)
correlated_opinions = np.random.multivariate_normal(mu, cov, (n_simulations,))
correlated_opinions.mean(1).var()
pl1 = plt.plot(np.linspace(1, 10, num=10), 10/np.linspace(1, 10, num=10));
pl2 = plt.plot(np.linspace(1, 10, num=10), 10*((1-0.9)/np.linspace(1, 10, num=10) + 0.9));
plt.xlabel('B');
plt.ylabel('Variance');
plt.ylim([0, 12]);
plt.legend(['no correlation', '90% correlation']);
import findspark
# use the appropriate path to spark_home
# for example, for cloudera:
try:
findspark.init(spark_home="/opt/cloudera/parcels/SPARK2/lib/spark2")
except:
# or if you have it somewhere else
findspark.init(spark_home="/Users/danielacuna/Downloads/spark-2.0.0-bin-hadoop2.7")
import pyspark
import numpy as np
# conf = pyspark.SparkConf().\
# setAppName('credit-score').\
# setMaster('local[*]')
from pyspark.sql import SparkSession
spark = SparkSession.builder.\
appName("credit-score").\
master('local[2]').\
getOrCreate()
# dataframe functions
from pyspark.sql import functions as fn
!rm -rf cs-training.parquet && wget https://github.com/daniel-acuna/python_data_science_intro/blob/master/data/cs-training.parquet.zip?raw=true -O cs-training.parquet.zip && unzip cs-training.parquet.zip && rm cs-training.parquet.zip
import os
credit_score_df = spark.read.parquet('file:///' + os.path.join(os.getcwd(), 'cs-training.parquet'))
# change type to double, drop NAs, and change dependent variable name to label
credit_score_df = credit_score_df.withColumnRenamed('SeriousDlqin2yrs', 'label')
from pyspark.ml.feature import VectorAssembler
training_df, validation_df, testing_df = credit_score_df.randomSplit([0.6, 0.3, 0.1])
# build a pipeline for analysis
va = VectorAssembler().setInputCols(training_df.columns[2:]).setOutputCol('features')
from pyspark.ml.classification import LogisticRegression, RandomForestClassifier
from pyspark.ml import Pipeline
lr = LogisticRegression(regParam=0.1)
lr_pipeline = Pipeline(stages=[va, lr]).fit(training_df)
rf = RandomForestClassifier()
rf_pipeline = Pipeline(stages=[va, rf]).fit(training_df)
from pyspark.ml.evaluation import BinaryClassificationEvaluator
bce = BinaryClassificationEvaluator()
bce.evaluate(lr_pipeline.transform(validation_df))
bce.evaluate(rf_pipeline.transform(validation_df))
lr_model = lr_pipeline.stages[-1]
import pandas as pd
pd.DataFrame(list(zip(credit_score_df.columns[2:], lr_model.coefficients.toArray())),
columns = ['column', 'weight']).sort_values('weight')
rf_model = rf_pipeline.stages[-1]
pd.DataFrame(list(zip(credit_score_df.columns[2:], rf_model.featureImportances.toArray())),
columns = ['column', 'weight']).sort_values('weight')
len(rf_model.trees)
print(rf_model.trees[0].toDebugString)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The problem with this function is that there is no linear function that correctly can classify the data. It is non-separable.
Step2: Types of decision trees
Step3: The variance seems to be a 10th of the original variance! Which is exacty right.
Step4: With 90% correlation, the crowd doesn't do much better than individuals! In general, the variance of correlated opinions is
Step5: Random forest
Step6: | Variable Name | Description | Type |
Step7: Without much changes, Random Forest performs much better!
Step8: Random forest
Step9: Let's take a look at one tree
|
3,742
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
tf.enable_eager_execution()
tfe = tf.contrib.eager
# Creating variables
v = tfe.Variable(1.0)
v
v.assign_add(1.0)
v
# In the tf.keras.layers package, layers are objects. To construct a layer,
# simply construct the object. Most layers take as a first argument the number
# of output dimensions / channels.
layer = tf.keras.layers.Dense(100)
# The number of input dimensions is often unnecessary, as it can be inferred
# the first time the layer is used, but it can be provided if you want to
# specify it manually, which is useful in some complex models.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
layer(tf.zeros([2, 2]))
layer.variables
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.variables])
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
# TODO: Implement a convolutional model as described above, and assign it to
# model.
model = tf.keras.Sequential([
])
#@title Click to see the answer
max_pool = tf.keras.layers.MaxPooling2D(
(2, 2), (2, 2), padding='same')
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
32,
5,
padding='same',
activation=tf.nn.relu),
max_pool,
tf.keras.layers.Conv2D(
64,
5,
padding='same',
activation=tf.nn.relu),
max_pool,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(10)
])
model(tf.zeros([1, 28, 28, 1]))
#@title Utilities for downloading MNIST data (double-click to show code)
import gzip
import os
import tempfile
from six.moves import urllib
import shutil
import numpy as np
def read32(bytestream):
Read 4 bytes from bytestream as an unsigned 32-bit integer.
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
Validate that filename corresponds to images for the MNIST dataset.
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
Validate that filename corresponds to labels for the MNIST dataset.
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
Download (and unzip) a file from the MNIST dataset if not already done.
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
Download and parse MNIST dataset.
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [28, 28, 1])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def get_training_data(directory):
tf.data.Dataset object for MNIST training data.
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte').take(1024)
def get_test_data(directory):
tf.data.Dataset object for MNIST test data.
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
# Don't forget to run the cell above!
training_data = get_training_data("/tmp/mnist/train")
test_data = get_test_data("/tmp/mnist/test")
EPOCHS = 5
optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.5)
def loss_fn(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.squeeze(logits), labels=labels))
def train_one_epoch(model, training_data, optimizer):
# TODO: Implement an optimization step and return the average loss.
#
# Hint: Use `tf.GradientTape` to compute the gradient of the loss, and use
# `optimizer.apply_gradients` to update the model's variables, which are
# accessible as `model.variables`
average_loss = tfe.metrics.Mean('loss')
for images, labels in training_data.shuffle(buffer_size=10000).batch(64):
pass
return average_loss.result()
for epoch in range(EPOCHS):
loss = train_one_epoch(model, training_data, optimizer)
print("Average loss after epoch %d: %.4f" % (epoch, loss))
#@title Double-click to see a solution.
EPOCHS = 5
optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.5)
def _loss_fn(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.squeeze(logits), labels=labels))
def _train_one_epoch(model, training_data):
average_loss = tfe.metrics.Mean("loss")
for images, labels in training_data.shuffle(buffer_size=10000).batch(64):
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = _loss_fn(logits, labels)
average_loss(loss)
gradients = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(gradients, model.variables))
return average_loss.result()
for epoch in range(EPOCHS):
loss = _train_one_epoch(model, training_data)
print("Average loss after epoch %d: %.4f" % (epoch, loss))
import matplotlib.pyplot as plt
sampled_data = test_data.batch(1).shuffle(buffer_size=10000).take(5)
for image, label in sampled_data:
plt.figure()
plt.imshow(tf.reshape(image, (28, 28)))
plt.show()
logits = model(image, training=False)
prediction = tf.argmax(logits, axis=1, output_type=tf.int64)
print("Prediction: %d" % prediction)
import cProfile
import pstats
cProfile.run("train_one_epoch(model, training_data, optimizer)", "training_profile")
stats = pstats.Stats("training_profile").strip_dirs().sort_stats("tottime")
stats.print_stats(10)
stats.sort_stats("cumtime").print_stats(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variables
Step2: Layers
Step3: The full list of pre-existing layers can be seen in the documentation. It includes Dense (a fully-connected layer),
Step4: Much of the time, however, models which compose many layers simply call one layer after the other. This can be done in very little code using tf.keras.Sequential
Step5: Exercise!
Step13: Stop here for now
Step14: Fill in the implementation of train_one_epoch below and run the cell to train your model.
Step15: Run the below cell to qualitatively evaluate your model. Note how eager execution interoperates seamlessly with matplotlib.
Step16: Profiling
|
3,743
|
<ASSISTANT_TASK:>
Python Code:
!pip install thinc syntok "ml_datasets>=0.2.0" tqdm
from syntok.tokenizer import Tokenizer
def tokenize_texts(texts):
tok = Tokenizer()
return [[token.value for token in tok.tokenize(text)] for text in texts]
import ml_datasets
import numpy
def load_data():
train_data, dev_data = ml_datasets.dbpedia(train_limit=2000, dev_limit=2000)
train_texts, train_cats = zip(*train_data)
dev_texts, dev_cats = zip(*dev_data)
unique_cats = list(numpy.unique(numpy.concatenate((train_cats, dev_cats))))
nr_class = len(unique_cats)
print(f"{len(train_data)} training / {len(dev_data)} dev\n{nr_class} classes")
train_y = numpy.zeros((len(train_cats), nr_class), dtype="f")
for i, cat in enumerate(train_cats):
train_y[i][unique_cats.index(cat)] = 1
dev_y = numpy.zeros((len(dev_cats), nr_class), dtype="f")
for i, cat in enumerate(dev_cats):
dev_y[i][unique_cats.index(cat)] = 1
train_tokenized = tokenize_texts(train_texts)
dev_tokenized = tokenize_texts(dev_texts)
# Generate simple vocab mapping, <unk> is 0
vocab = {}
count_id = 1
for text in train_tokenized:
for token in text:
if token not in vocab:
vocab[token] = count_id
count_id += 1
# Map texts using vocab
train_X = []
for text in train_tokenized:
train_X.append(numpy.array([vocab.get(t, 0) for t in text]))
dev_X = []
for text in dev_tokenized:
dev_X.append(numpy.array([vocab.get(t, 0) for t in text]))
return (train_X, train_y), (dev_X, dev_y), vocab
from typing import List
import thinc
from thinc.api import Model, chain, list2ragged, with_array, reduce_mean, Softmax
from thinc.types import Array2d
@thinc.registry.layers("EmbedPoolTextcat.v1")
def EmbedPoolTextcat(embed: Model[Array2d, Array2d]) -> Model[List[Array2d], Array2d]:
with Model.define_operators({">>": chain}):
model = with_array(embed) >> list2ragged() >> reduce_mean() >> Softmax()
model.set_ref("embed", embed)
return model
CONFIG =
[hyper_params]
width = 64
[model]
@layers = "EmbedPoolTextcat.v1"
[model.embed]
@layers = "Embed.v1"
nO = ${hyper_params:width}
[optimizer]
@optimizers = "Adam.v1"
learn_rate = 0.001
[training]
batch_size = 8
n_iter = 10
from thinc.api import registry, Config
C = registry.resolve(Config().from_str(CONFIG))
C
(train_X, train_y), (dev_X, dev_y), vocab = load_data()
batch_size = C["training"]["batch_size"]
optimizer = C["optimizer"]
model = C["model"]
model.get_ref("embed").set_dim("nV", len(vocab) + 1)
model.initialize(X=train_X, Y=train_y)
def evaluate_model(model, dev_X, dev_Y, batch_size):
correct = 0.0
total = 0.0
for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):
Yh = model.predict(X)
for j in range(len(Yh)):
correct += Yh[j].argmax(axis=0) == Y[j].argmax(axis=0)
total += len(Y)
return float(correct / total)
from thinc.api import fix_random_seed
from tqdm.notebook import tqdm
fix_random_seed(0)
for n in range(C["training"]["n_iter"]):
loss = 0.0
batches = model.ops.multibatch(batch_size, train_X, train_y, shuffle=True)
for X, Y in tqdm(batches, leave=False):
Yh, backprop = model.begin_update(X)
d_loss = []
for i in range(len(Yh)):
d_loss.append(Yh[i] - Y[i])
loss += ((Yh[i] - Y[i]) ** 2).sum()
backprop(numpy.array(d_loss))
model.finish_update(optimizer)
score = evaluate_model(model, dev_X, dev_y, batch_size)
print(f"{n}\t{loss:.2f}\t{score:.3f}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For simple and standalone tokenization, we'll use the syntok package and the following function
Step2: Setting up the data
Step3: Defining the model and config
Step5: The config defines the top-level model using the registered EmbedPoolTextcat function, and the embed argument, referencing the Embed layer.
Step6: Training setup
Step7: Once the data is loaded, we'll know the vocabulary size and can set the dimension on the embedding layer. model.get_ref("embed") returns the layer defined as the ref "embed" and the set_dim method lets you set a value for a dimension. To fill in the other missing shapes, we can call model.initialize with some input and output data.
Step8: Training the model
|
3,744
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import display, HTML
display(HTML('My first ipython HTML <b>bold</b> tag'))
display(HTML('My first ipython HTML <em>italics</em> tag'))
display(HTML('My first ipython HTML <em>italics tag'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 2
Step2: Debug 1
|
3,745
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
K_5=nx.complete_graph(5)
nx.draw(K_5)
def complete_deg(n):
np.zeros((n,n), dtype = np.int)
np.diag(np.diag(n-1))
print complete_deg()
D = complete_deg() #I dont get this!!! Ughhhh!
assert D.shape==(5,5)
assert D.dtype==np.dtype(int)
assert np.all(D.diagonal()==4*np.ones(5))
assert np.all(D-np.diag(D.diagonal())==np.zeros((5,5),dtype=int))
def complete_adj(n):
Return the integer valued adjacency matrix A for the complete graph K_n.
# YOUR CODE HERE
raise NotImplementedError()
A = complete_adj(5)
assert A.shape==(5,5)
assert A.dtype==np.dtype(int)
assert np.all(A+np.eye(5,dtype=int)==np.ones((5,5),dtype=int))
# YOUR CODE HERE
raise NotImplementedError()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Complete graph Laplacian
Step2: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
Step4: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
Step5: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$.
|
3,746
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc, patches
# Increase font size, set CM as default text, and use LaTeX
rc('font', **{'size': 16, 'family': 'serif', 'serif': ['Computer Modern Roman']})
rc('text', usetex=True)
# Define colours (taken from http://colorbrewer2.org)
clr = ['#377eb8', '#e41a1c', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628']
%load -s circle adashof.py
def circle(xy, radius, kwargs=None):
Create circle on figure with axes of different sizes.
Plots a circle on the current axes using `plt.Circle`, taking into account
the figure size and the axes units.
It is done by plotting in the figure coordinate system, taking the aspect
ratio into account. In this way, the data dimensions do not matter.
However, if you adjust `xlim` or `ylim` after plotting `circle`, it will
screw them up; set `plt.axis` before calling `circle`.
Parameters
----------
xy, radius, kwars :
As required for `plt.Circle`.
# Get current figure and axis
fig = plt.gcf()
ax = fig.gca()
# Calculate figure dimension ratio width/height
pr = fig.get_figwidth()/fig.get_figheight()
# Get the transScale (important if one of the axis is in log-scale)
tscale = ax.transScale + (ax.transLimits + ax.transAxes)
ctscale = tscale.transform_point(xy)
cfig = fig.transFigure.inverted().transform(ctscale)
# Create circle
if kwargs == None:
circ = patches.Ellipse(cfig, radius, radius*pr,
transform=fig.transFigure)
else:
circ = patches.Ellipse(cfig, radius, radius*pr,
transform=fig.transFigure, **kwargs)
# Draw circle
ax.add_artist(circ)
# Generate some data to plot
x = np.arange(101)/100*2*np.pi
y = np.sin(x)
# Circle centres
cxy = (np.arange(5)*np.pi/2, np.sin(np.arange(5)*np.pi/2))
## 1.a Using plt.Circle on equal axes
# Create figure
fig1a = plt.figure()
# Set axis to equal
plt.axis('equal')
# Plot data and set limits
plt.plot(x, y, '-', c=clr[6], lw=2)
plt.xlim([min(x), max(x)])
# Draw circles with plt.Circle
# (`clip_on: False` ensures that the circles are not cut-off at fig-border.)
for i in range(5):
circ = plt.Circle((cxy[0][i], cxy[1][i]), .25, **{'color':clr[i], 'clip_on': False})
plt.gca().add_artist(circ)
# Set labels
plt.title('1.a plt.Circle with equal axes')
plt.text(1, -1.5, r'$y = \rm{sin}(x)$', fontsize=20)
plt.xlabel('x')
plt.ylabel('y')
# Multiply y-values by 5, to make the effect of unequal axes more apparent
y *= 5
cxy = (np.arange(5)*np.pi/2, 5*np.sin(np.arange(5)*np.pi/2))
## 1.b Using plt.Circle on unequal axes
# Create figure
fig1b = plt.figure()
# Plot data and set limits
plt.plot(x, y, '-', c=clr[6], lw=2)
plt.axis([min(x), max(x), 1.2*min(y), 1.2*max(y)])
# Draw circles with plt.Circle
for i in range(5):
circ = plt.Circle((cxy[0][i], cxy[1][i]), .25, **{'color':clr[i], 'clip_on': False})
plt.gca().add_artist(circ)
# Set labels
plt.title('1.b plt.Circle with unequal axes')
plt.text(1, -5, r'$y = 5\times\rm{sin}(x)$', fontsize=20)
plt.xlabel('x')
plt.ylabel('y')
## 1.c Using patches.Ellipse on unequal axes
# Create figure
fig1c = plt.figure()
# Plot data and set limits
plt.plot(x, y, '-', c=clr[6], lw=2)
plt.axis([min(x), max(x), 1.2*min(y), 1.2*max(y)])
# Calculate width and height of Ellipse to create an apparent circle
factor = fig1c.get_figwidth()*(max(1.2*y)-min(1.2*y))/fig1c.get_figheight()/(max(x)-min(x))
# Draw circles with patches.Ellipse
for i in range(5):
circ = patches.Ellipse((cxy[0][i], cxy[1][i]), .5, .5*factor,
**{'color':clr[i], 'clip_on': False})
plt.gca().add_artist(circ)
# Set labels
plt.title('1.c patches.Ellipse with unequal axes')
plt.text(1, -5, r'$y = 5\times\rm{sin}(x)$', fontsize=20)
plt.xlabel('x')
plt.ylabel('y')
## 1.d Using circle on unequal axes
# Create figure
fig1d = plt.figure()
# Plot data and set limits (before plotting the circles!)
plt.plot(x, y, '-', c=clr[6], lw=2)
plt.axis([min(x), max(x), 1.2*min(y), 1.2*max(y)])
# Draw circles with circle
for i in range(5):
circle((cxy[0][i], cxy[1][i]), .06, {'color':clr[i], 'clip_on': False})
# Set labels
plt.title('1.d `circle` with unequal axes')
plt.text(1, -5, r'$y = 5\times\rm{sin}(x)$', fontsize=20)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# Generate some data to plot
lx = np.arange(1, 102)
ly = lx**2
# Circle centres
lcxy = (np.arange(3)*50+1, (np.arange(3)*50+1)**2)
## 2.a Semilog
# Create figure
fig2a = plt.figure()
# Plot data and set limits
plt.semilogy(lx, ly, '-', c=clr[6], lw=2)
plt.xlim([min(lx), max(lx)])
# Plot circles
for i in range(3):
circle((lcxy[0][i], lcxy[1][i]), 0.07, {'color':clr[i], 'clip_on': False})
# Set labels
plt.title('2.a `circle` with semilog-axes')
plt.text(60, 10, r'$y = x^2$', fontsize=20)
plt.xlabel('x')
plt.ylabel('y')
## 2.b Loglog
# Create figure
fig2a = plt.figure()
# Plot data and set limits
plt.loglog(lx, ly, '-', c=clr[6], lw=2)
plt.xlim([min(lx), max(lx)])
# Plot circles
for i in range(3):
circle((lcxy[0][i], lcxy[1][i]), .07, {'color':clr[i], 'clip_on': False})
# Set labels
plt.title('2.b `circle` with loglog-axes')
plt.text(20, 10, r'$y = x^2$', fontsize=20)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Load the circle-function
Step3: Linear example
Step4: Semilog and loglog example
|
3,747
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu")(inputs)
x2 = layers.Dense(64, activation="relu")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %s samples" % ((step + 1) * batch_size))
# Get model
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Update training metric.
train_acc_metric.update_state(y_batch_train, logits)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
@tf.function
def test_step(x, y):
val_logits = model(x, training=False)
val_acc_metric.update_state(y, val_logits)
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
# Add any extra losses created during the forward pass.
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
# Instantiate one optimizer for the discriminator and another for the generator.
d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)
g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def train_step(real_images):
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(labels.shape)
# Train the discriminator
with tf.GradientTape() as tape:
predictions = discriminator(combined_images)
d_loss = loss_fn(labels, predictions)
grads = tape.gradient(d_loss, discriminator.trainable_weights)
d_optimizer.apply_gradients(zip(grads, discriminator.trainable_weights))
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = discriminator(generator(random_latent_vectors))
g_loss = loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, generator.trainable_weights)
g_optimizer.apply_gradients(zip(grads, generator.trainable_weights))
return d_loss, g_loss, generated_images
import os
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 1 # In practice you need at least 20 epochs to generate nice digits.
save_dir = "./"
for epoch in range(epochs):
print("\nStart epoch", epoch)
for step, real_images in enumerate(dataset):
# Train the discriminator & generator on one batch of real images.
d_loss, g_loss, generated_images = train_step(real_images)
# Logging.
if step % 200 == 0:
# Print metrics
print("discriminator loss at step %d: %.2f" % (step, d_loss))
print("adversarial loss at step %d: %.2f" % (step, g_loss))
# Save one generated image
img = tf.keras.preprocessing.image.array_to_img(
generated_images[0] * 255.0, scale=False
)
img.save(os.path.join(save_dir, "generated_img" + str(step) + ".png"))
# To limit execution time we stop after 10 steps.
# Remove the lines below to actually train the model!
if step > 10:
break
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 从头编写训练循环
Step2: 简介
Step3: 我们使用带自定义训练循环的 mini-batch 梯度对其进行训练。
Step4: 下面是我们的训练循环:
Step5: 指标的低级处理
Step6: 下面是我们的训练和评估循环:
Step7: 使用 tf.function 加快训练步骤的速度
Step8: 我们对评估步骤执行相同的操作:
Step9: 现在,我们使用编译后的训练步骤重新运行训练循环:
Step10: 速度快了很多,对吗?
Step11: 我们构建一个使用它的超简单模型:
Step12: 我们的训练步骤现在应当如下所示:
Step13: 总结
Step14: 接着,我们创建一个生成器网络,它可以将隐向量转换成形状为 (28, 28, 1)(表示 MNIST 数字)的输出:
Step15: 这是关键部分:训练循环。如您所见,训练非常简单。训练步骤函数仅有 17 行代码。
Step16: 我们通过在各个图像批次上重复调用 train_step 来训练 GAN。
|
3,748
|
<ASSISTANT_TASK:>
Python Code:
def fun(x):
return np.exp(-x**2)
integr.quad(fun,0.,1.)
integr.quad(lambda x:np.exp(-x**2),0.,1.)
def fun(x,a):
return x**a
[integr.quad(fun,0.,1.,args=(m,))[0] for m in range(0,5)]
res1 = integr.quad(lambda y:integr.quad(lambda x,y:np.exp(-x**2-y**2)/np.pi,-1,1, args=(y,))[0],-1,1)
res2 = integr.dblquad(lambda x,y:np.exp(-x**2-y**2)/np.pi, -1, 1, lambda x: -1, lambda x: 1)
res3 = integr.nquad(lambda x,y:np.exp(-x**2-y**2)/np.pi, ranges=[[-1.,1.],[-1.,1.]])
print(res1)
print(res2)
print(res3)
# Integrar ODEs
# ODE de primer orden
# Resuelve la ecuación y'= -alpha y con factor de decaimiento alpha=1.
res = integr.solve_ivp(lambda t,y: -1.*y, np.array([0.,6.]),np.array([1.]))
plt.plot(res.t,np.exp(-res.t),label="exacto, alpha=1",c="red")
plt.scatter(res.t,res.y[0],label="numerico, alpha=1",c="red")
# Resuelve la ecuación y'= -alpha y con factor de decaimiento alpha=2.
res = integr.solve_ivp(lambda t,y: -2.*y, np.array([0.,6.]),np.array([1.]))
plt.plot(res.t,np.exp(-2*res.t),label="exacto, alpha=2",c="blue")
plt.scatter(res.t,res.y[0],label="numerico, alpha=2",c="blue")
plt.legend()
plt.show()
# ODE de segundo orden
# Resuelve la ecuación y''= -w^2 y con factor de decaimiento alpha=1.
# Resuelve la ecuación y''= -w^2 y con factor de decaimiento w^2=1.
res = integr.solve_ivp(lambda t,y: [y[1],-y[0]], np.array([0.,12.]),np.array([1.,0.]))
plt.plot(res.t,np.cos(res.t),label="exacto, w^2=1",c="red")
plt.scatter(res.t,res.y[0],label="numerico, w^2=1",c="red")
# Resuelve la ecuación y''= -w^2 y con factor de decaimiento w^2=2.
res = integr.solve_ivp(lambda t,y: [y[1],-2.*y[0]], np.array([0.,12.]),np.array([1.,0.]))
plt.plot(res.t,np.cos(np.sqrt(2)*res.t),label="exacto, w^2=2",c="blue")
plt.scatter(res.t,res.y[0],label="numerico, w^2=2",c="blue")
# Resuelve la ecuación y''= -w^2 y con factor de decaimiento w^2=16.
res = integr.solve_ivp(lambda t,y: [y[1],-16.*y[0]], np.array([0.,12.]),np.array([1.,0.]))
plt.plot(res.t,np.cos(np.sqrt(16)*res.t),label="exacto, w^2=16",c="green")
plt.scatter(res.t,res.y[0],label="numerico, w^2=16",c="green")
plt.legend()
plt.show()
# Para mejorar el resultado, definir la integración sobre más puntos vía el parámetro t_eval:
# Resuelve la ecuación y''= -w^2 y con factor de decaimiento w^2=1.
res = integr.solve_ivp(lambda t,y: [y[1],-y[0]], np.array([0.,12.]),np.array([1.,0.]),t_eval=np.linspace(0,12,100))
plt.plot(res.t,np.cos(res.t),label="exacto, w^2=1",c="red")
plt.scatter(res.t,res.y[0],label="numerico, w^2=1",c="red")
# Resuelve la ecuación y''= -w^2 y con factor de decaimiento w^2=2.
res = integr.solve_ivp(lambda t,y: [y[1],-2.*y[0]], np.array([0.,12.]),np.array([1.,0.]),t_eval=np.linspace(0,12,100))
plt.plot(res.t,np.cos(np.sqrt(2)*res.t),label="exacto, w^2=2",c="blue")
plt.scatter(res.t,res.y[0],label="numerico, w^2=2",c="blue")
# Resuelve la ecuación y''= -w^2 y con factor de decaimiento w^2=16.
res = integr.solve_ivp(lambda t,y: [y[1],-16.*y[0]], np.array([0.,12.]),np.array([1.,0.]),t_eval=np.linspace(0,12,100))
plt.plot(res.t,np.cos(np.sqrt(16)*res.t),label="exacto, w^2=16",c="green")
plt.scatter(res.t,res.y[0],label="numerico, w^2=16",c="green")
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: En ocasiones, conviene definir la función a integrar como dependiente de parámetros. Por ejemplo
Step2: Integrales impropias
Step3: Integración de ecuaciones diferenciales ordinarias (ODEs)
Step4: Integraciónd de ODEs de segundo orden
|
3,749
|
<ASSISTANT_TASK:>
Python Code:
pip install --user apache-beam[gcp]
import apache_beam as beam
print(beam.__version__)
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
# Create SQL query using natality data after the year 2000
query =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
# Call BigQuery and examine in dataframe
from google.cloud import bigquery
df = bigquery.Client().query(query + " LIMIT 100").to_dataframe()
df.head()
import datetime, os
def to_csv(rowdict):
# Pull columns from BQ and create a line
import hashlib
import copy
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks'.split(',')
# Create synthetic data where we assume that no ultrasound has been performed
# and so we don't know sex of the baby. Let's assume that we can tell the difference
# between single and multiple, but that the errors rates in determining exact number
# is difficult in the absence of an ultrasound.
no_ultrasound = copy.deepcopy(rowdict)
w_ultrasound = copy.deepcopy(rowdict)
no_ultrasound['is_male'] = 'Unknown'
if rowdict['plurality'] > 1:
no_ultrasound['plurality'] = 'Multiple(2+)'
else:
no_ultrasound['plurality'] = 'Single(1)'
# Change the plurality column to strings
w_ultrasound['plurality'] = ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)'][rowdict['plurality'] - 1]
# Write out two rows for each input row, one with ultrasound and one without
for result in [no_ultrasound, w_ultrasound]:
data = ','.join([str(result[k]) if k in result else 'None' for k in CSV_COLUMNS])
key = hashlib.sha224(data.encode('utf-8')).hexdigest() # hash the columns to form a key
yield str('{},{}'.format(data, key))
def preprocess(in_test_mode):
import shutil, os, subprocess
job_name = 'preprocess-babyweight-features' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
if in_test_mode:
print('Launching local job ... hang on')
OUTPUT_DIR = './preproc'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
os.makedirs(OUTPUT_DIR)
else:
print('Launching Dataflow job {} ... hang on'.format(job_name))
OUTPUT_DIR = 'gs://{0}/babyweight/preproc/'.format(BUCKET)
try:
subprocess.check_call('gsutil -m rm -r {}'.format(OUTPUT_DIR).split())
except:
pass
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': job_name,
'project': PROJECT,
'region': REGION,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True,
'num_workers': 4,
'max_num_workers': 5
}
opts = beam.pipeline.PipelineOptions(flags = [], **options)
if in_test_mode:
RUNNER = 'DirectRunner'
else:
RUNNER = 'DataflowRunner'
p = beam.Pipeline(RUNNER, options = opts)
query =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
AND weight_pounds > 0
AND mother_age > 0
AND plurality > 0
AND gestation_weeks > 0
AND month > 0
if in_test_mode:
query = query + ' LIMIT 100'
for step in ['train', 'eval']:
if step == 'train':
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) < 3'.format(query)
else:
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) = 3'.format(query)
(p
| '{}_read'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query = selquery, use_standard_sql = True))
| '{}_csv'.format(step) >> beam.FlatMap(to_csv)
| '{}_out'.format(step) >> beam.io.Write(beam.io.WriteToText(os.path.join(OUTPUT_DIR, '{}.csv'.format(step))))
)
job = p.run()
if in_test_mode:
job.wait_until_finish()
print("Done!")
preprocess(in_test_mode = False)
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the command again if you are getting oauth2client error.
Step2: You may receive a UserWarning about the Apache Beam SDK for Python 3 as not being yet fully supported. Don't worry about this.
Step4: <h2> Save the query from earlier </h2>
Step6: <h2> Create ML dataset using Dataflow </h2>
Step7: The above step will take 20+ minutes. Go to the GCP web console, navigate to the Dataflow section and <b>wait for the job to finish</b> before you run the following step.
|
3,750
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (10, 6)
pop = pd.read_csv('data/cars_small.csv')
pop.head()
class_mapping = {'Hatchback': 0, 'Sedan': 1}
pop['types'] = pop['type'].map(class_mapping)
plt.scatter(pop.price, pop.types, c=pop.types, s = 150, alpha = 0.8 )
plt.xlabel('price')
plt.ylabel('types')
def ols (df, xlabel, ylabel):
n = df.shape[0]
x0 = np.ones(n)
x1 = df[xlabel]
X = np.c_[x0, x1]
X = np.asmatrix(X)
y = np.transpose(np.asmatrix(df[ylabel]))
X_T = np.transpose(X)
X_pseudo = np.linalg.inv(X_T * X) * X_T
beta = X_pseudo * y
return beta
def plot_ols(df, xlabel, ylabel):
beta = ols(df, 'price', 'types')
beta_0 = beta.item(0)
beta_1 = beta.item(1)
plt.scatter(df[xlabel], df[ylabel], c=df[ylabel], s = 150, alpha = 0.8 )
plt.xlabel(xlabel)
plt.ylabel(ylabel)
y = beta_0 + beta_1 * df[xlabel]
plt.plot(df[xlabel], y, '-')
cutoff = (0.5 - beta_0)/beta_1
plt.vlines(cutoff, -0.4, 1.4)
ols(pop, 'price', 'types')
plot_ols(pop, 'price', 'types')
pop1 = pop.copy()
pop1.tail()
# Lets create an outlier
pop1.loc[37,'price'] = 1500
pop1.loc[41,'price'] = 2000
plot_ols(pop1, 'price', 'types')
z = np.linspace(-10, 10, 100)
p = 1/(1+np.exp(-z))
plt.plot(z,p)
plt.hlines(0.5, -20,20)
plt.vlines(0, 0,1)
plt.xlabel('z')
plt.ylabel('P(z)')
plt.scatter(pop['kmpl'], pop['price'], c=pop['types'], s = 150, alpha = 0.8 )
plt.xlabel('kmpl')
plt.ylabel('price')
z = np.linspace(0.001, 0.999, 1000)
c1 = -np.log(z)
c2 = -np.log(1-z)
plt.plot(z,c1)
plt.plot(z,c2)
#plt.hlines(0.5, -10,10)
#plt.vlines(0, 0,1)
plt.xlabel('z')
plt.ylabel('Cost')
n = pop.shape[0]
x0 = np.ones(n)
x1 = pop.kmpl
x2 = pop.price
X_actual = np.c_[x1, x2]
X_norm = (X_actual - np.mean(X_actual, axis=0)) / np.std(X_actual, axis=0)
X = np.c_[x0, X_norm]
X = np.asmatrix(X)
y = np.asmatrix(pop.types.values.reshape(-1,1))
b = np.asmatrix([[0],[0],[0]])
def P(z):
return 1.0/(1+np.exp(-z))
def cost(X,y,b,n):
C = (- y.T*np.log(P(X*b))-(1-y.T)*np.log(1-P(X*b)))/n
return C[0,0]
def gradient(X,y,b,n):
g = (2/n)*X.T*(P(X*b) - y)
return g
def gradient_descent_logistic (eta, epochs, X, y, n):
# Set Initial Values
b = np.asmatrix([[0],[0],[0]])
c = cost(X,y,b,n)
c_all = []
c_all.append(c)
# Run the calculation for those many epochs
for i in range(epochs):
g = gradient(X,y,b,n)
b = b - eta * g
c = cost(X,y,b,n)
c_all.append(c)
return c_all, b
from __future__ import division
x1_min, x1_max = -3, 3
x2_min, x2_max = -3, 3
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, (x1_max - x1_min)/100),
np.arange(x2_min, x2_max, (x2_max - x2_min)/100))
xx = np.c_[np.ones(xx1.ravel().shape[0]), xx1.ravel(), xx2.ravel()]
def plot_gradient_descent(eta, epoch, gradient_func):
es, bs = gradient_func(eta, epoch, X, y, n)
# Plot the intercept and coefficients
plt.subplot(1, 2, 1)
#plt.tight_layout()
# Plot the probabilty plot contour
Z = P(xx*bs)
Z = Z.reshape(xx1.shape)
cs = plt.contourf(xx1, xx2, Z, cmap=plt.cm.viridis, alpha = 0.5)
plt.colorbar(cs)
# Plot the intercept and coefficients
plt.scatter(X[:,1], X[:,2], c=pop.types, s = 150, alpha = 0.8 )
plt.xlabel('kmpl')
plt.ylabel('price')
# Plot the error rates
plt.subplot(1, 2, 2)
plt.plot(es)
plt.xlabel('Epochs')
plt.ylabel('Error')
plot_gradient_descent(0.05, 1000, gradient_descent_logistic)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets say we want to classify the vehicles by 'Hatchback' and 'Sedan'
Step2: Why Linear Function does not work
Step3: However, there are two problems with this approach
Step4: Logistic Function
Step5: So now we can transpose our linear regression problem with this logit function
Step6: Cost Function
Step7: Gradient Descent for Logistic Function
|
3,751
|
<ASSISTANT_TASK:>
Python Code:
#
# this function has 3 optional arguments
#
def optional_args(a=None, b='one', c=3):
print('a={}, b={}, c={}'.format(a,b,c))
optional_args()
#prints
#a=None, b=one, c=3
#
# we can also pass the arguments via a dictionary
# so we can save them/modify them
#
arg_dict=dict(a=4,b=[1,2,3],c='hello')
optional_args(**arg_dict)
#
# prints
# a=4, b=[1, 2, 3], c=hello
#
def optional_args(**kwargs):
print('got these args: {}'.format(kwargs))
optional_args(bacon=True,fab_4=(1,2,3),bozo='5')
def required_args(a,b,c,test_opt=5):
print('required: a -- {}, b -- {}, c -- {}'.format(a,b,c))
print('optional arg {}'.format(test_opt))
required_args(1,2,3)
#prints:
#required: a -- 1, b -- 2, c -- 3
#optional arg 5
arg_list=[5,'two',[3,1,7]]
required_args(*arg_list,test_opt=10)
#
# prints:
# required: a -- 5, b -- two, c -- [3, 1, 7]
# optional arg 10
#
def show_expansion(*args, **kwargs):
print('required arguments are: {}'.format(args))
print('optional arguments are: {}'.format(kwargs))
show_expansion(1,2,[4,5],boys='pink',fish='shiny')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Writing a function to take unknown optional arguments
Step2: Required arguments
Step3: Writing a function to take an unknown number of required arguments and optional arguments
|
3,752
|
<ASSISTANT_TASK:>
Python Code:
import iris
fname = iris.sample_data_path('air_temp.pp')
cubes = iris.load(fname)
print(type(cubes))
print(cubes)
cube = iris.load_cube(fname)
print(type(cube))
print(cube)
cubes[0] == cube
#
# edit space for user code ...
#
fname = iris.sample_data_path('uk_hires.pp')
cubes = iris.load(fname)
iris.save(cubes, 'saved_cubes.nc')
!ncdump -h saved_cubes.nc | head -n 20
!rm saved_cubes.nc
# space for user code ...
# SAMPLE SOLUTION
# %load solutions/iris_exercise_2.3a
# space for user code ...
# SAMPLE SOLUTION
# %load solutions/iris_exercise_2.3b
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.1 Iris Load Functions<a id='iris_load_functions'></a>
Step2: If we give this filepath to load, we see that Iris returns a cubelist.
Step3: A CubeList is a specialised version of a Python list object
Step4: If we compare the first cube in the cubelist returned by calling load and the cube returned by calling load_cube we see that they are equal.
Step5: <div class="alert alert-block alert-warning">
Step6: 2.2 Saving Cubes<a id='saving'></a>
Step7: We can check the ncdump to see what Iris saved
Step8: Extra keywords can be passed to specific fileformat savers.
Step9: 2. Go to the Iris reference documentation for iris.save. What keywords are accepted to iris.save when saving a PP file?
|
3,753
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tf-nightly --upgrade
!pip install jax --upgrade
!pip install jaxlib --upgrade
import numpy as np
import tensorflow as tf
import functools
import time
import itertools
import numpy.random as npr
import jax.numpy as jnp
from jax import jit, grad, random
from jax.experimental import optimizers
from jax.experimental import stax
def _one_hot(x, k, dtype=np.float32):
Create a one-hot encoding of x of size k.
return np.array(x[:, None] == np.arange(k), dtype)
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images = train_images.astype(np.float32)
test_images = test_images.astype(np.float32)
train_labels = _one_hot(train_labels, 10)
test_labels = _one_hot(test_labels, 10)
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return -jnp.mean(jnp.sum(preds * targets, axis=1))
def accuracy(params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=1)
predicted_class = jnp.argmax(predict(params, inputs), axis=1)
return jnp.mean(predicted_class == target_class)
init_random_params, predict = stax.serial(
stax.Flatten,
stax.Dense(1024), stax.Relu,
stax.Dense(1024), stax.Relu,
stax.Dense(10), stax.LogSoftmax)
rng = random.PRNGKey(0)
step_size = 0.001
num_epochs = 10
batch_size = 128
momentum_mass = 0.9
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
def data_stream():
rng = npr.RandomState(0)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield train_images[batch_idx], train_labels[batch_idx]
batches = data_stream()
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=momentum_mass)
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
_, init_params = init_random_params(rng, (-1, 28 * 28))
opt_state = opt_init(init_params)
itercount = itertools.count()
print("\nStarting training...")
for epoch in range(num_epochs):
start_time = time.time()
for _ in range(num_batches):
opt_state = update(next(itercount), opt_state, next(batches))
epoch_time = time.time() - start_time
params = get_params(opt_state)
train_acc = accuracy(params, (train_images, train_labels))
test_acc = accuracy(params, (test_images, test_labels))
print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
print("Training set accuracy {}".format(train_acc))
print("Test set accuracy {}".format(test_acc))
serving_func = functools.partial(predict, params)
x_input = jnp.zeros((1, 28, 28))
converter = tf.lite.TFLiteConverter.experimental_from_jax(
[serving_func], [[('input1', x_input)]])
tflite_model = converter.convert()
with open('jax_mnist.tflite', 'wb') as f:
f.write(tflite_model)
expected = serving_func(train_images[0:1])
# Run the model with TensorFlow Lite
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]["index"], train_images[0:1, :, :])
interpreter.invoke()
result = interpreter.get_tensor(output_details[0]["index"])
# Assert if the result of TFLite model is consistent with the JAX model.
np.testing.assert_almost_equal(expected, result, 1e-5)
def representative_dataset():
for i in range(1000):
x = train_images[i:i+1]
yield [x]
converter = tf.lite.TFLiteConverter.experimental_from_jax(
[serving_func], [[('x', x_input)]])
tflite_model = converter.convert()
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_quant_model = converter.convert()
with open('jax_mnist_quant.tflite', 'wb') as f:
f.write(tflite_quant_model)
expected = serving_func(train_images[0:1])
# Run the model with TensorFlow Lite
interpreter = tf.lite.Interpreter(model_content=tflite_quant_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]["index"], train_images[0:1, :, :])
interpreter.invoke()
result = interpreter.get_tensor(output_details[0]["index"])
# Assert if the result of TFLite model is consistent with the Jax model.
np.testing.assert_almost_equal(expected, result, 1e-5)
!du -h jax_mnist.tflite
!du -h jax_mnist_quant.tflite
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TFLite용 Jax 모델 변환
Step3: 데이터 준비
Step4: Jax로 MNIST 모델 빌드
Step5: 모델 학습 및 평가
Step6: TFLite 모델로 변환합니다.
Step7: 변환된 TFLite 모델 확인
Step8: 모델 최적화
Step9: 최적화된 모델 평가
Step10: 양자화된 모델 크기 비교
|
3,754
|
<ASSISTANT_TASK:>
Python Code:
%%tikz --scale 2 --size 800,300 -f svg
\tikzset{node distance=2cm,
block/.style={rectangle, draw, minimum height=15mm, minimum width=20mm},
sumnode/.style={circle, draw, inner sep=2pt}
}
\node[coordinate] (input) {};
\node[block, right of=input] (TR) {$F_f(s)=K\frac{b_1}{a_1}$};
\node[sumnode, right of=TR] (sum) {$\sum$};
\node[block,right of=sum, node distance=30mm] (plant) {$G(s)=\frac{k_d}{Js^2}$};
\node[sumnode, right of=plant] (sumdist) {$\sum$};
\node[coordinate, above of=sumdist, node distance=15mm] (dist) {};
\node[coordinate, right of=sumdist, node distance=15mm] (measure) {};
\node[coordinate, right of=measure, node distance=10mm] (output) {};
\node[sumnode,below of=measure, node distance=25mm] (sumnoise) {$\sum$};
\node[coordinate, right of=sumnoise, node distance=15mm] (noise) {};
\node[block,left of=sumnoise, node distance=30mm] (SR) {$F_b(s) = K\frac{s+b_1}{s+b_1}$};
\draw[->] (input) -- node[above, pos=0.2] {$\theta_{ref}(t)$} (TR);
\draw[->] (TR) -- node[above] {} (sum);
\draw[->] (sum) -- node[above] {$u(t)$} (plant);
\draw[->] (plant) -- (sumdist);
\draw[->] (dist) -- node[at start, right] {$v(t)$} (sumdist);
\draw[->] (sumdist) -- node[at end, above] {$\theta(t)$} (output);
\draw[->] (measure) -- (sumnoise);
\draw[->] (noise) -- node[at start, above] {$n$} (sumnoise);
\draw[->] (sumnoise) -- (SR);
\draw[->] (SR) -| (sum) node[left, pos=0.96] {$-$};
s = sy.symbols('s')
a1,b1,kd,K,J = sy.symbols('a_1,b_1,k_d,K,J')
w0 = sy.symbols('omega0')
p1 = -w0
p2 = w0*(-0.5 + sy.I*sy.sqrt(3)/2)
p3 = w0*(-0.5 - sy.I*sy.sqrt(3)/2)
Ac = sy.collect(sy.expand((s-p1)*(s-p2)*(s-p3)), s)
Ac
A = s**2
B = kd/J
R = s+a1
S = K*(s+b1)
lhs = A*R + B*S
lhs
dioph = sy.collect(sy.expand(lhs-Ac), s)
dioph
w0_n=1.0
J_n = 1.0
kd_n = 1.0
a1_n = 2*w0_n
b1_n = w0_n/2.0
K_n = 2*J_n*w0_n**2/kd_n
Fb = K_n*cm.tf([1, b1_n], [1, a1_n])
mag, phase, omega = cm.bode(Fb)
#plt.figure()
#mag, phase, omega = cm.bode(Fb)
#plt.figure()
#plt.loglog(mag, omega)
K = 2*J*w0**2/kd
a1 = 2*w0
b1 = w0/2
Fb = K*(s + b1)/(s+a1)
G = kd/J / s**2
Go = G*Fb
sy.simplify(sy.expand(Go))
Go_at_w0 = Go.subs(s, sy.I*w0)
sy.simplify(Go_at_w0)
sy.Abs(sy.simplify(Go_at_w0))
sy.arg(sy.simplify(Go_at_w0)).evalf()*180/np.pi
w0_n=1.0
J_n = 1.0
kd_n = 1.0
a1_n = 2*w0_n
b1_n = w0_n/2.0
K_n = 2*J_n*w0_n/kd_n
Fb = K_n*cm.tf([1, b1_n], [1, a1_n])
G = kd_n/J_n * cm.tf([1],[1, 0, 0])
plt.figure()
mag, phase, omega = cm.bode(Fb*G)
yout, tt = cm.step(Fb/K_n)
plt.figure()
plt.plot(tt, yout)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model
Step2: Finding the controller parameters
Step3: Bodeplot of the loop gain
|
3,755
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
C = np.complex(3,4)
print('C=', C)
print(type(C))
c = np.array([3+4j])
#c = 3+4j
print('c=', c)
print(type(c))
print('Parte real:', c.real)
print('Parte imaginária:', c.imag)
print(c.shape)
cc = np.conjugate(c)
print('c=', c)
print('Complexo conjugado:', cc)
print('Parte real:', cc.real)
print('Parte imaginária:', cc.imag)
c3 = np.array(2+2j)
print('c3=', c3)
print('Módulo:', np.abs(c3))
print('Argumento:', np.rad2deg(np.angle(c3)))
np.angle(c3)*180/np.pi
C_array = np.array([[3+4j, 2+2j],[1-2j, -2+2j]])
print('C_array=\n', C_array)
print('Módulo:\n', np.abs(C_array))
print('Argumento:\n', np.rad2deg(np.angle(C_array)))
print('Shape:',C_array.shape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Porém, como iremos trabalhar com números complexos em imagens, utilizaremos não apenas um
Step2: O conjugado deste mesmo número complexo $c$ é dado por
Step3: Representação no plano complexo
Step4: Representação polar
|
3,756
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-1', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
3,757
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError:
print("Seaborn not installed. Oh well.")
import numpy as np
import astropy.io.fits as fits
import sherpa.astro.ui as ui
from clarsach.respond import RMF, ARF
datadir = "../data/athena/"
data_file = "26.pha"
rmf_file = "athena_xifu_rmf_highres_v20150609.rmf"
arf_file = "athena_xifu_sixte_1469_onaxis_v20150402.arf"
hdulist = fits.open(datadir+data_file)
hdulist.info()
s = hdulist["SPECTRUM"]
s.columns
channel = s.data.field("CHANNEL")
counts = s.data.field("COUNTS")
plt.figure(figsize=(10,5))
plt.plot(channel, counts)
arf = ARF(datadir+arf_file)
rmf = RMF(datadir+rmf_file)
resp_model = np.ones_like(counts)
m_arf = arf.apply_arf(resp_model)
m_rmf = rmf.apply_rmf(m_arf)
c_deconv = counts/m_rmf
plt.figure(figsize=(10, 5))
plt.plot(channel, c_deconv)
plt.xscale("log")
ui.load_data("26", datadir+data_file)
d = ui.get_data("26")
arf_s = d.get_arf()
rmf_s = d.get_rmf()
print("ARF: " + str(arf_s))
print("RMF: " + str(rmf_s))
assert np.all(arf_s.specresp == arf.specresp), "Clarsach ARF is different from Sherpa ARF"
ui.set_source("26", ui.polynom1d.truespec)
c_deconv_s = ui.get_ratio_plot("26").y
e_deconv_s = ui.get_ratio_plot("26").x
plt.figure(figsize=(10,5))
plt.plot(e_deconv_s, c_deconv, label="Clarsach Deconvolution")
plt.plot(e_deconv_s, c_deconv_s, label="Sherpa Deconvolution")
plt.legend()
plt.yscale('log')
np.allclose(c_deconv, c_deconv_s)
import astropy.modeling.models as models
from astropy.modeling.fitting import _fitter_to_model_params
from scipy.special import gammaln as scipy_gammaln
pl = models.PowerLaw1D()
pl.x_0.fixed = True
class PoissonLikelihood(object):
def __init__(self, x, y, model, arf=None, rmf=None, bounds=None):
self.x = x
self.y = y
self.model = model
self.arf = arf
self.rmf = rmf
if bounds is None:
bounds = [self.x[0], self.x[-1]]
min_idx = self.x.searchsorted(bounds[0])
max_idx = self.x.searchsorted(bounds[1])
self.idx = [min_idx, max_idx]
def evaluate(self, pars):
# store the new parameters in the model
_fitter_to_model_params(self.model, pars)
# evaluate the model at the positions x
mean_model = self.model(self.x)
# run the ARF and RMF calculations
if arf is not None and rmf is not None:
m_arf = arf.apply_arf(mean_model)
ymodel = rmf.apply_rmf(m_arf)
else:
ymodel = mean_model
# cut out the part of the spectrum that's of interest
y = self.y[self.idx[0]:self.idx[1]]
ymodel = ymodel[self.idx[0]:self.idx[1]]
# compute the log-likelihood
loglike = np.sum(-ymodel + y*np.log(ymodel) \
- scipy_gammaln(y + 1.))
if np.isfinite(loglike):
return loglike
else:
return -1.e16
def __call__(self, pars):
l = -self.evaluate(pars)
#print(l)
return l
loglike = PoissonLikelihood(e_deconv_s, counts, pl, arf=arf, rmf=rmf, bounds=[1.0, 6.0])
loglike([1.0, 2.0])
from scipy.optimize import minimize
opt = minimize(loglike, [1.0, 1.0])
opt
_fitter_to_model_params(pl, opt.x)
mean_model = pl(loglike.x)
m_arf = arf.apply_arf(mean_model)
ymodel = rmf.apply_rmf(m_arf)
ymodel_small = ymodel[loglike.idx[0]:loglike.idx[1]]
y_small = loglike.y[loglike.idx[0]:loglike.idx[1]]
e_deconv_small = e_deconv_s[loglike.idx[0]:loglike.idx[1]]
print(np.mean(y_small-ymodel_small))
plt.figure()
plt.plot(e_deconv_small, y_small, label="Data")
plt.plot(e_deconv_small, ymodel_small, label="Model")
plt.legend()
plt.figure(figsize=(10,5))
plt.plot(e_deconv_small, c_deconv[loglike.idx[0]:loglike.idx[1]], label="Data")
plt.plot(e_deconv_small, mean_model[loglike.idx[0]:loglike.idx[1]], label="Best-fit model")
plt.xlim(1.0, 6.0)
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's load some data
Step2: Let's load the data using Clàrsach
Step3: Let's also load the ARF and RMF
Step4: Let's make an empty model to divide out the responses
Step5: This seems to be working not badly. Let's try the same with sherpa
Step6: Do the ARF and RMF exist?
Step7: There's no RMF, because the RMF for Athena does not seem to be a variable length field and is thus not read in. Oh well.
Step8: Looks like this worked. Let's do the deconvolution with sherpa and look at the results
Step9: Well, I didn't actually expect them to be the same, so all right. This might also be due to the fact that I don't understand everything about what ratio_plot is doing; at some point I need to talk to Victoria about that.
Step10: We'll need to fix the x_0 parameter of the power law model to continue
Step11: Let's define a Poisson log-likelihood
Step12: Ok, cool, let's make a PoissonLikelihood object to use
Step13: Let's fit this with a minimization algorithm
Step14: Looks like it has accurately found the photon index of 2. Let's make a best-fit example model and plot the raw spectra
Step15: Let's also plot the deconvolved version for fun
|
3,758
|
<ASSISTANT_TASK:>
Python Code:
from horsetailmatching import HorsetailMatching, UniformParameter
from horsetailmatching.demoproblems import TP2
from horsetailmatching.surrogates import PolySurrogate
import numpy as np
uparams = [UniformParameter(), UniformParameter()]
thePoly = PolySurrogate(dimensions=len(uparams), order=4)
u_quadrature = thePoly.getQuadraturePoints()
def myPolynomialChaosSurrogate(u_quad, q_quad):
thePoly.train(q_quad)
return thePoly.predict
theHM = HorsetailMatching(TP2, uparams, surrogate=myPolynomialChaosSurrogate, surrogate_points=u_quadrature)
print('Metric evaluated with polynomial chaos surrogate: ', theHM.evalMetric([0, 1]))
theHM.surrogate = None
print('Metric evaluated with direct sampling: ', theHM.evalMetric([0, 1]))
from pyKriging.krige import kriging
from pyKriging.samplingplan import samplingplan
sp = samplingplan(2)
u_sampling = sp.optimallhc(25)
def myKrigingSurrogate(u_lhc, q_lhc):
krig = kriging(u_lhc, q_lhc)
krig.train()
return krig.predict
theHM.surrogate = myKrigingSurrogate
theHM.surrogate_points = u_sampling
print('Metric evaluated with kriging surrogate: ', theHM.evalMetric([0, 1]))
theHM.surrogate = None
print('Metric evaluated with direct sampling: ', theHM.evalMetric([0, 1]))
from equadratures import Polyreg
U1, U2 = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(-1, 1, 5))
u_tensor = np.vstack([U1.flatten(), U2.flatten()]).T
def myQuadraticSurrogate(u_tensor, q_tensor):
poly = Polyreg(np.mat(u_tensor), np.mat(q_tensor).T, 'quadratic')
def model(u):
return poly.testPolynomial(np.mat(u))
return model
theHM.surrogate = myQuadraticSurrogate
theHM.surrogate_points = u_tensor
print('Metric evaluated with quadratic surrogate: ', theHM.evalMetric([0, 1]))
theHM.surrogate = None
print('Metric evaluated with direct sampling: ', theHM.evalMetric([0, 1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets start with the built in in polynomial chaos surrogate. This finds the coefficients of a polynomial expansion by evaluating the inner product of the qoi function with each polynomial using gaussian quadrature.
Step2: Next we use the pyKriging samplingplan function to give us 20 points found via latin hypercube sampling at which to evaluate the metric to create the surrogate. Then we create a function in the form required by horsetail matching called myKrigingSurrogate, and pass this as the surrogate argument when making the horestail matching object, along with the LHS points as the surrogate_points argument. Here we modify the already created horsetail matching object instead of making a new one.
Step3: Now we do a similar thing with the effective quadrature toolbox to make a quadratic polynomial surrogate.
|
3,759
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
start_col, end_col = 'starttime', 'stoptime'
# Loading just first 10,000 rows
df = pd.read_csv('201501-citibike-tripdata.csv', parse_dates=[start_col, end_col], nrows=10000)
@np.vectorize
def minutes(time):
Convert time to minutes since 00:00
>>> minutes('3:17')
197
# Work on strings as well
if isinstance(time, str):
time = pd.Timestamp(time)
return (time.hour * 60) + time.minute
# Parts of day, we can have overlaps here
parts_of_day = [
('morning', minutes('05:00am'), minutes('11:00')),
('noon', minutes('11:00am'), minutes('14:00')),
('evening', minutes('14:00'), minutes('19:00')),
# Night is two continuous parts
('night_a', minutes('18:30'), minutes('23:59')),
('night_b', minutes('00:00'), minutes('6:00')),
]
# Auxiliary DataFrame
parts_df = pd.DataFrame(index=df.index)
for col in [start_col, end_col]:
parts_df[col] = minutes(df[col].dt.time)
for name, start, end in parts_of_day:
parts_df['start_p'] = start
parts_df['end_p'] = end
ends = parts_df[[end_col, 'end_p']].min(axis=1)
starts = parts_df[[start_col, 'start_p']].max(axis=1)
duration = ends - starts
parts_df.loc[duration >= 0, name] = duration[duration >= 0]
parts_df['night'] = parts_df['night_a'] + parts_df['night_b']
parts_df.fillna(0, inplace=True)
# Assign part of day where most time was spent
df['day_part'] = parts_df[['morning', 'noon', 'evening', 'night']].idxmax(axis=1)
df['day_part'].sample(20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step3: Parts of Day
Step4: Sample Output
|
3,760
|
<ASSISTANT_TASK:>
Python Code:
data = pd.read_csv('data/driving_log.csv', header=None,
names=['center', 'left', 'right', 'angle', 'throttle', 'break', 'speed'])
print(data.ix[0].center)
data.sample()
def img_id(path):
return path.split('/IMG/')[1]
image_paths = data.center.apply(img_id).values.tolist()
image_paths[:5]
# y_all = data[['angle', 'throttle']].values
y_all = data.angle.values
n_samples = y_all.shape[0]
print("Training Model with {} Samples".format(n_samples))
def read_image(path):
img = cv2.imread(path, cv2.IMREAD_COLOR)
img = img[40:160, 0:320] ## Cropping top section of image, just useless noise
# img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# img = np.expand_dims(img, axis=2)
return img[:,:,::-1]
X_all = np.ndarray((n_samples, ROWS, COLS, CHANNELS), dtype=np.uint8)
for i, path in enumerate(image_paths):
DIR+path
img = read_image(DIR+path)
X_all[i] = img
print(X_all.shape)
for img in X_all[:3]:
plt.imshow(img)
plt.show()
X_train, X_test, y_train, y_test = train_test_split(
X_all, y_all, test_size=0.20, random_state=23)
def fit_gen(data, batch_size):
while 1:
x = np.ndarray((batch_size, ROWS, COLS, CHANNELS), dtype=np.uint8)
y = np.zeros(batch_size)
i=0
for line in data.iterrows():
path = line[1].center.split('/IMG/')[1]
x[i] = read_image(DIR+path)
y[i] = line[1].angle
i+=1
if i == batch_size:
i=0
yield (x, y)
x = np.ndarray((batch_size, ROWS, COLS, CHANNELS), dtype=np.uint8)
y = np.zeros(batch_size)
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def get_model():
lr = 0.0001
weight_init='glorot_normal'
opt = RMSprop(lr)
loss = 'mean_squared_error'
model = Sequential()
model.add(BatchNormalization(mode=2, axis=1, input_shape=(ROWS, COLS, CHANNELS)))
model.add(Convolution2D(3, 3, 3, init=weight_init, border_mode='same', activation='relu', input_shape=(ROWS, COLS, CHANNELS)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(24, 3, 3, init=weight_init, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(36, 3, 3, init=weight_init, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(48, 3, 3, init=weight_init, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(80, activation='relu', init=weight_init))
model.add(Dense(20, activation='relu', init=weight_init))
model.add(Dropout(0.25))
model.add(Dense(1, init=weight_init, activation='linear'))
model.compile(optimizer=opt, loss=loss)
return model
model = get_model()
model.summary()
nb_epoch = 50
batch_size = 64
# Callbacks
early_stopping = EarlyStopping(monitor='val_loss', patience=8, verbose=1, mode='auto')
save_weights = ModelCheckpoint('new_model.h5', monitor='val_loss', save_best_only=True)
# model.fit_generator(fit_gen(data, 32),
# samples_per_epoch=data.shape[0], nb_epoch=nb_epoch,
# validation_data=(X_test, y_test), callbacks=[save_weights, early_stopping])
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test), verbose=1, shuffle=True, callbacks=[save_weights, early_stopping])
preds = model.predict(X_test, verbose=1)
print( "Test MSE: {}".format(mean_squared_error(y_test, preds)))
print( "Test RMSE: {}".format(np.sqrt(mean_squared_error(y_test, preds))))
js = model.to_json()
with open('model.json', 'w') as outfile:
json.dump(js, outfile)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading and Preprocessing the Images with OpenCV
Step2: Building a Convnet in Keras
|
3,761
|
<ASSISTANT_TASK:>
Python Code:
'8 = %d , 8.5 = %.1f, name = %s, 3 = %04d' % (8, 8.5, 'Ravi', 3)
'8 = {}, 8.5 = {}, name = {}, 3 = {:04}'.format(8, 8.5, 'Ravi', 3)
'8 = {a}, 8.5 = {b}, name = {c}, 3 = {d:04}'.format(a = 8, c = 'Ravi', d = 3, b = 8.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There is one more way to format, without the hassle of remembering the format specifiers. You can use format() method of the current string. Same output of above example can be obtained as follows
Step2: If the number of items to be formatted goes long, it would become hard to remember their positions. You can name each entry in format string, and refer to them in the call to format() as shown below. Note that the order of items can be changed now as the items are only referred by name.
|
3,762
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.show()
#
# EDIT for user code ...
#
# %load solutions/cartopy_exercise_1
# Make sure the figure is a decent size when plotted.
fig = plt.figure(figsize=(14, 7))
# Left plot.
ax1 = fig.add_subplot(1, 2, 1, projection=ccrs.PlateCarree())
ax1.coastlines()
# Right plot.
ax2 = fig.add_subplot(1, 2, 2, projection=ccrs.Orthographic())
ax2.coastlines()
# Show both subplots on the same figure.
plt.show()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x0, y0 = -50, -30
x1, y1 = 10, 55
plt.plot([x0, x1], [y0, y1], linewidth=4)
plt.show()
proj = ccrs.EquidistantConic()
ax = plt.axes(projection=proj)
ax.coastlines()
plt.plot([x0, x1], [y0, y1], linewidth=4)
plt.show()
ax = plt.axes(projection=proj)
ax.coastlines()
plt.plot([x0, x1], [y0, y1], linewidth=4, transform=ccrs.PlateCarree())
plt.show()
ax = plt.axes(projection=proj)
ax.coastlines()
ax.set_global()
plt.plot([x0, x1], [y0, y1], linewidth=4, transform=ccrs.PlateCarree())
plt.show()
#
# edit space for user code ...
#
# SAMPLE SOLUTION
# %load solutions/cartopy_exercise_2
#
# edit space for user code ...
#
# SAMPLE SOLUTION
# %load solutions/cartopy_exercise_3
import cartopy.feature as cfeat
fig = plt.figure(figsize=(14, 7))
ax = plt.axes(projection=ccrs.Miller())
ax.coastlines('50m')
# ax.add_feature(cfeat.BORDERS, edgecolor='b')
political_bdrys = cfeat.NaturalEarthFeature(category='cultural',
name='admin_0_countries',
scale='50m')
ax.add_feature(political_bdrys,
edgecolor='b', facecolor='none',
linestyle='--', zorder=-1)
plt.show()
ax = plt.axes(projection=ccrs.Mercator())
ax.coastlines()
gl = ax.gridlines(draw_labels=True)
plt.show()
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LATITUDE_FORMATTER
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
gl = ax.gridlines(draw_labels=True)
gl.xlocator = mticker.FixedLocator([-180, -45, 0, 45, 180])
gl.yformatter = LATITUDE_FORMATTER
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
x = np.linspace(337, 377, 25)
y = np.linspace(-18.7, 25.3, 35)
x2d, y2d = np.meshgrid(x, y)
data = np.cos(np.deg2rad(y2d) * 4) + np.sin(np.deg2rad(x2d) * 4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cartopy's matplotlib interface is set up via the projection keyword when constructing a matplotlib Axes / SubAxes instance. The resulting axes instance has new methods, such as the coastlines() method, which are specific to drawing cartographic data
Step2: A full list of Cartopy projections is available at http
Step3: <b><font color="brown">SAMPLE SOLUTION
Step4: Let's compare the Plate Carree projection to another projection from the projection list; being the Orthographic projection. We'll do that by plotting two subplots next to each other at the same time
Step5: Transforming data
Step6: Now let's try plotting the same line on an EquidistantConic projection.
Step7: The above plot is not what we intended.
Step8: Notice that the plotted line is bent
Step9: <div class="alert alert-block alert-warning">
Step10: <div class="alert alert-block alert-warning">
Step11: Adding features
Step12: We can add graticule lines and tick labels to the map using the gridlines method (this currently is limited to just a few coordinate reference systems)
Step13: We can control the specific tick values by using matplotlib's locator object, and the formatting can be controlled with matplotlib formatters
Step14: Cartopy cannot currently label all types of projection, though more work is intended on this functionality in the future.
|
3,763
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
#Every request begins with the server's URL
SERVER = 'http://data.neonscience.org/api/v0/'
#Site Code for Lower Teakettle
SITECODE = 'TEAK'
#Make request, using the sites/ endpoint
site_request = requests.get(SERVER+'sites/'+SITECODE)
#Convert to Python JSON object
site_json = site_request.json()
site_json
#Use the 'keys' method to view the component of the uppermost json dictionary
site_json.keys()
#Access the 'data' component, and use the 'keys' method to view to componenets of the json data dictionary
site_json['data'].keys()
#View the first data product dictionary
site_json['data']['dataProducts'][0]
#View product code and name for every available data product
for product in site_json['data']['dataProducts']:
print(product['dataProductCode'],product['dataProductTitle'])
#Look at Breeding Landbird Count data products
PRODUCTCODE = 'DP1.10003.001'
#Get available months of Breeding Landbird Count data products for TEAK site
#Loop through the 'dataProducts' list items (each one a dict) at the site
for product in site_json['data']['dataProducts']:
if(product['dataProductCode'] == PRODUCTCODE): #If a list item's 'dataProductCode' dict element equals the product code string,
print('Available Months: ',product['availableMonths']) #print the available months and URLs
print('URLs for each Month: ', product['availableDataUrls'])
#Make request
product_request = requests.get(SERVER+'products/'+PRODUCTCODE)
product_json = product_request.json()
#Print keys for product data dictionary
print(product_json['data'].keys())
#Print code, name, and abstract of data product
print(product_json['data']['productCode'])
print(product_json['data']['productName'])
print()
print(product_json['data']['productAbstract'])
#View keys of one site dictionary
print(product_json['data']['siteCodes'][0].keys())
#View available months and corresponding API urls, then save desired URL
for site in product_json['data']['siteCodes']:
if(site['siteCode'] == SITECODE):
for month in zip(site['availableMonths'],site['availableDataUrls']): #Loop through the list of months and URLs
print(month[0],month[1])
if(month[0] == '2018-06'): #If data is available for the desired month, save the URL
data_url = month[1]
print(data_url)
#Make Request
data_request = requests.get(SERVER+'data/'+PRODUCTCODE+'/'+SITECODE+'/'+'2018-06')
data_json = data_request.json()
#Make request with saved url
data_request = requests.get(data_url)
data_json = data_request.json()
#Print dict key for 'data' element of data JSON
print(data_json['data'].keys())
#View keys and values in first file dict
for key in data_json['data']['files'][0].keys(): #Loop through keys of the data file dict
print(key,':\t', data_json['data']['files'][0][key])
for file in data_json['data']['files']:
print(file['name'])
for file in data_json['data']['files']:
if(('_perpoint' in file['name'])|('_countdata' in file['name'])): #if file name includes '_perpoint' or '_countdata'
print(file['name'],file['url'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Site Querying
Step2: We first use the requests module to send the API request using the 'get' function; this returns a 'request' object.
Step3: The JSON object in Python is a complex collection, with nested layers of dictionaries ('dicts') and lists.
Step4: At the uppermost level the JSON object is a dictionary containing a single element with the label 'data'. This 'data' element in turn contains a dictionary with elements containing various pieces of information about the site. When we want to know what elements a dict contians, we can use the .keys() method to list the keys to each element in that dict.
Step5: This output shows that the entire API response is contained within a single dict called 'data'. In order to access any of the information contained within this highest-level 'data' dict, we will need to reference that dict directly. Let's view the different keys that are available within 'data'
Step6: The returned JSON keys includes information on site location, site type, site name and code, and the availability of different data products for the site. This last piece of information is located in the element with the 'dataProducts' key.
Step7: Lists are a type of sequential data, so we can use Python's for loop to directly go through every element one by one, in this case to print out the data product code and data product name.
Step8: Typically, we use site queries to determine for which months a particular data product is available at a particular site. Let's look for the availability of Breeding Landbird Counts (DP1.10003.001)
Step9: For each data product, there will be a list of the months for which data of that type was collected and it available at the site, and a corresponding list with the URLs that we would put into the API to get data on that month of data products.
Step10: Data Product Querying
Step11: The product JSON will again store everything first in a 'data' element. Within this container, the product data is a dictionary with information on the data product we are looking up.
Step12: This request returned a lot of different types of information. Much of this information is meant to provide explanations and context for the data product. Let's look at the abstract, which provides a relatively brief description of the data product.
Step13: For looking up the availability of the data product, we want the 'siteCodes' element. This is a list with an entry for each site at which the data product is available. Each site entry is a dict whose elements includes site code, a list of months for which data is available, and a list of the API request URLs to request data for that site for a given month.
Step14: We can look up the availability of data at a particular site, and get a URL to request data for a specific month. We know from earlier that Lower Teakettle (TEAK) has the data product we want for June 2018; we can get the URL needed to request that data with nested loops through site and month lists.
Step15: Data File Querying
Step16: Alternatively we could use one of the "Available Data URLs" from a sites/ or products/ request, like the data_url we saved earlier.
Step17: As with the sites JSON content, the uppermost level of a data request JSON object is a dictionary whose only member has the 'data' key; this member in turn is a dictionary with the product code, the sitecode, the month, and a list of the available data files.
Step18: A number of different files are available, but the actual count data are in files which have 'brd_perpoint' or 'brd_countdata' in the file name.
|
3,764
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1/(1+np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
output_error_term = error
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(output_error_term, self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 5200
learning_rate = 0.8
hidden_nodes = 9
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 加载和准备数据
Step2: 数据简介
Step3: 虚拟变量(哑变量)
Step4: 调整目标变量
Step5: 将数据拆分为训练、测试和验证数据集
Step6: 我们将数据拆分为两个数据集,一个用作训练,一个在网络训练完后用来验证网络。因为数据是有时间序列特性的,所以我们用历史数据进行训练,然后尝试预测未来数据(验证数据集)。
Step7: 开始构建网络
Step8: 单元测试
Step9: 训练网络
Step10: 检查预测结果
|
3,765
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'sandbox-3', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
3,766
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
test_data = pd.read_csv("../data/relex-sparse-multiple-choice.csv")
test_data.head()
import crowdtruth
from crowdtruth.configuration import DefaultConfig
class TestConfig(DefaultConfig):
inputColumns = ["sent_id", "term1", "b1", "e1", "term2", "b2", "e2", "sentence", "input_relations"]
outputColumns = ["output_relations"]
annotation_separator = "\n"
# processing of a closed task
open_ended_task = False
annotation_vector = [
"title", "founded_org", "place_of_birth", "children", "cause_of_death",
"top_member_employee_of_org", "employee_or_member_of", "spouse",
"alternate_names", "subsidiaries", "place_of_death", "schools_attended",
"place_of_headquarters", "charges", "origin", "places_of_residence",
"none"]
def processJudgments(self, judgments):
# pre-process output to match the values in annotation_vector
for col in self.outputColumns:
# transform to lowercase
judgments[col] = judgments[col].apply(lambda x: str(x).lower())
return judgments
data, config = crowdtruth.load(
file = "../data/relex-sparse-multiple-choice.csv",
config = TestConfig()
)
data['judgments'].head()
results = crowdtruth.run(data, config)
results["units"].head()
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(results["units"]["uqs"])
plt.xlabel("Sentence Quality Score")
plt.ylabel("Sentences")
results["units"]["unit_annotation_score"].head(10)
results["workers"].head()
plt.hist(results["workers"]["wqs"])
plt.xlabel("Worker Quality Score")
plt.ylabel("Workers")
results["annotations"]
results["units"].to_csv("../data/results/sparsemultchoice-relex-units.csv")
results["workers"].to_csv("../data/results/sparsemultchoice-relex-workers.csv")
results["annotations"].to_csv("../data/results/sparsemultchoice-relex-annotations.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Declaring a pre-processing configuration
Step2: Our test class inherits the default configuration DefaultConfig, while also declaring some additional attributes that are specific to the Relation Extraction task
Step3: Pre-processing the input data
Step4: Computing the CrowdTruth metrics
Step5: results is a dict object that contains the quality metrics for sentences, relations and crowd workers.
Step6: The uqs column in results["units"] contains the sentence quality scores, capturing the overall workers agreement over each sentence. Here we plot its histogram
Step7: The unit_annotation_score column in results["units"] contains the sentence-relation scores, capturing the likelihood that a relation is expressed in a sentence. For each sentence, we store a dictionary mapping each relation to its sentence-relation score.
Step8: The worker metrics are stored in results["workers"]
Step9: The wqs columns in results["workers"] contains the worker quality scores, capturing the overall agreement between one worker and all the other workers.
Step10: The relation metrics are stored in results["annotations"]. The aqs column contains the relation quality scores, capturing the overall worker agreement over one relation.
|
3,767
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import cv2
%matplotlib inline
from matplotlib import pyplot as plt
REALSQUARE = 23.500 # Size of a square
BOARDDIM = (6,8) # Dimensions of the given board
NUMIMG = 9 # Number of images to open
imagesList = list()
# Opens each image and adds to the image list
PATH = "quiz1-data/"
for fileName in os.listdir(PATH):
imagesList.append(cv2.imread(PATH + fileName, 0))
# Computes the real world coordinates of each beginning of a square in the board
realSquares = []
for i in range(BOARDDIM[1]):
for j in range(BOARDDIM[0]):
realSquares.append([j*REALSQUARE, i*REALSQUARE, 0.000])
# Converts to numpy array
realSquares = np.array(realSquares, dtype=np.float32)
matA = list()
for item in range(NUMIMG):
img = imagesList[item]
_, boardCorners = cv2.findChessboardCorners(img, BOARDDIM, None)
boardCorners = boardCorners.reshape((BOARDDIM[0] * BOARDDIM[1], 2))
for k in range(48):
x, y = boardCorners[k, :]
X, Y, Z = realSquares[k, :]
matA.append([x*X, x*Y, x*Z, x, -y*X, -y*Y, y*Z, -y])
matA = np.array(matA, dtype=np.float32)
U, D, V = np.linalg.svd(matA, full_matrices=True)
# The column of V corresponding to the minimal value in the diagonal of D
# In the given sample, D always contains a 0 in the 7th columny
# If we pick another value, v is generated with null values
vecV = V[6,:]
v1, v2, v3, v4, v5, v6, v7, v8 = vecV
# Compute the scale factor given the vector v
gamma = np.sqrt(v1**2 + v2**2 + v3**2)
# Compute the aspect ratio (alpha)
alpha = np.sqrt(v5**2 + v6**2 + v7**2) / gamma
# First row of R matrix
r11, r12, r13 = [v5 / alpha, v6 / alpha, v7 / alpha]
# Second row of R matrix
r21, r22, r23 = v1/gamma, v2/gamma, v3/gamma
# Third row of R matrix, computed by the cross product of rows 1 and 2
r31, r32, r33 = np.cross([r11, r12, r13], [r21, r22, r23])
# Obtain the elements of the translation vector
Tx, Ty = [v8/alpha, v4]
# If this product is bigger than 0, invert the signal on R[1,:] and R[2,:]
if x*(r11*X + r12*Y + r13*Z + Tx) > 0:
r11 = -r11
r12 = -r12
r13 = -r13
r21 = -r21
r22 = -r22
r23 = -r23
Tx = -Tx
Ty = -Ty
del matA
matA = list()
vecB = list()
# Generate new matrix A and vector B
for item in range(NUMIMG):
_, boardCorners = cv2.findChessboardCorners(imagesList[item], BOARDDIM, None)
boardCorners = boardCorners.reshape((BOARDDIM[0] * BOARDDIM[1], 2))
for k in range(48):
x, y = boardCorners[k, :]
X, Y, Z = realSquares[k]
matA.append([x, (r11*X + r12*Y + r13*Z + Tx)])
vecB.append([-x*(r31*X + r32*Y + r33*Z)])
matA = np.array(matA)
vecB = np.array(vecB)
# Solve by least squares the system Ax = B
vecSol,_, _, _ = np.linalg.lstsq(matA,vecB)
# Obtain Tz and fx
Tz, fx = vecSol
# Compute fy
fy = fx / alpha
# Matrix R and vector T representation in proper numpy objects
matR = np.array([[r11, r12, r13], [r21, r22, r23], [r31, r32, r33]])
vecT = np.array([[Tx], [Ty], [Tz]])
print("Matriz R \n {}".format(matR))
print("\nVetor T\n{}".format(vecT))
print("fx = {}".format(fx))
print("fy ={}".format(fy))
print("alpha = {}".format(alpha))
print("gamma = {}".format(gamma))
img1w = cv2.imread('extrin_param.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(1)
plt.imshow(img_rgb)
img1w = cv2.imread('extrin_param1.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(2)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_1.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(1)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_2.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(2)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_3.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(3)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_4.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(4)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_5.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(5)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_6.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(6)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_7.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(7)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_8.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(8)
plt.imshow(img_rgb)
img1w = cv2.imread('corner_9.png', cv2.IMREAD_COLOR)
img_rgb = cv2.cvtColor(img1w, cv2.COLOR_BGR2RGB)
plt.figure(9)
plt.imshow(img_rgb)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Detect the corners on the real images and compute A matrix
Step2: Now we must compute the parameters of the rotation matrix R and translation vector T, given the results of the SVD (singular values decomposition) of matrix A (remember this matrix was generated, in the loop above, using the product of each square corner real coordinates by it's image plane coordinates, plus a column with -y in the end of each row of A).
Step3: Scale factor = sqrt(r[2,1]^2 + r[2,2]^2 + r[2,3]^2)
Step4: Aspect ratio = sqrt(v[5]^2 + v[6]^2 + v[7]^2) / Scale factor
Step5: Extraction of rotation matrix R and translation vector T given the elements of v vector
Step6: Determinate the signal of gamma, to detect a possible signal inversion of the first two rows of R matrix.
Step7: Prints our results
Step8: Results given by the Toolbox using Matlab
Step9: Extrinsic Camera Parameters
|
3,768
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import sys
print("Python Version:",sys.version.split(" ")[0])
print("TensorFlow Version:",tf.VERSION)
sess = tf.InteractiveSession()
a = tf.zeros(())
a
a.eval()
a.shape
a.shape.ndims
a.name
tf.zeros(())
b = tf.zeros((3), name="b")
b
type(b.eval())
sess.run(b)
tf.fill((2,2), 5).eval()
tf.zeros((10,3)).eval()
tf.zeros((2,3,4)).eval()
arr1 = tf.random_normal((1000,))
arr1
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(arr1.eval(), bins=15);
arr1 = tf.random_normal((1000,), mean=20.0)
plt.hist(arr1.eval(), bins=15);
arr1 = tf.random_normal((1000,), stddev=2, name="arr1")
arr2 = tf.random_normal((1000,), stddev=1, name="arr2")
plt.hist([arr1.eval(), arr2.eval()], bins=15);
plt.hist(tf.truncated_normal((1000,)).eval(), bins=15);
arr1 = tf.random_uniform((1000,))
plt.hist(arr1.eval(), bins=15);
tf.range(5).eval()
tf.range(0, 5).eval()
tf.range(0, 5, 2).eval()
a = tf.range(6)
tf.reshape(a, (3,2)).eval()
a = tf.ones((2,2))
b = tf.fill((2,2), 10.0) # Notice we used 10.0 and not 10 to ensure the data type will be float32
c = a + b
c.eval()
d = c * 2.0
d.eval()
(d + 3).eval()
i = tf.eye(3,3)
i.eval()
a = tf.range(1,9)
i = tf.reshape(a, (2,4))
i.eval()
it = tf.matrix_transpose(i)
it.eval()
a = tf.ones((2,3))
b = tf.ones((3,4))
c = tf.matmul(a ,b)
print("c has the shape of:", c.shape)
c.eval()
g = [88, 94, 71, 97, 84, 82, 80, 98, 91, 93]
total = sum(g)
count = len(g)
mean = total/count
mean
from math import sqrt
σ = sqrt(sum([(x-mean)**2 for x in g]) / count)
σ
import numpy as np
np.std(g)
t = tf.constant(g, dtype=tf.float64)
mean_t, var_t = tf.nn.moments(t, axes=0)
sqrt(var_t.eval())
variance = sum([(x-mean)**2 for x in g]) / count
variance
a = [[1,0],
[3,2],
[1,4],
]
b = [[2,1,2],
[1,2,3],
]
a = tf.constant(a)
b = tf.constant(b)
c = tf.matmul(tf.matrix_transpose(a), tf.matrix_transpose(b))
c.eval()
c = tf.matmul(a,b, transpose_a=True, transpose_b=True)
c.eval()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graph Execution
Step2: Generating new Tensors
Step3: We create a tensor and assigned it to a local variable named a. When we check the value of a this is what we get.
Step4: Notice there is no value. You need to call eval() method of the tensor to get the actual value. This method takes an optional parameter where you can pass your session. Since we are using interactive session, we don't have ato pass anything.
Step5: You should know that eval() method returns a numpy.float32 (or what ever the type of the tensor is) if the rank of the tensor is 0 and numpy.ndarray if the tensor has rank 1 or higher.
Step6: the rank would be the number of dimensions.
Step7: Notice the name inside the TensorFlow execution engine is not a. It is zeros
Step8: If you created another variable using the same operation, it will be named zeros_1
Step9: Now let's create a second tensor of shape (3) which is going to be a rank-1 tensor. This time we will name it b and store it in a local variable named b.
Step10: Notice the name of the variable now is b
Step11: You can also get the value of a tensor by executing the tensor using your session.
Step12: You can also fill the tensor with any other value you want other than 0 and 1 using fill() function.
Step13: Notice that the data type of this tensor is int32 and not float32 because you initialized the tensor with an integer 5 and not 5.0.
Step14: Note
Step15: Generating Tensors with Random Values
Step16: This function returns random values using normal distribution which is also known as Gaussian distribution or informally called a "Bell Curve". To better understand it, let's first look at a graph showing this distribution. In Mathematics, a normal distribution of mean $\mu$ and standard deviation $\sigma$ is denoted $N(\mu, \sigma)$ (More about that in "The Math Behind It").
Step17: Notice the bill shape of the curve where you get more values around your mean a fewer values as you move away from the mean.
Step18: You can also control how concentrated your random numbers will be around the mean by controlling the standard deviation. Higher standard deviation means less values around the mean and wider distribution.
Step19: One more note on normal distribution, if you created a large tensor with millions or tens of millions of random values, some of these values will fall really far from the mean. With some machine learning algorithms this might create instability. You can avoid that by using truncated_normal() function instead of random_normal(). This will re-sample any values that falls more than 2 standard deviations from the mean.
Step20: Uniform Distribution
Step21: Generating Tensors with Sequence Values
Step22: Which is equivalent to
Step23: Notice that the output of range() function will never reach the limit parameter.
Step24: Reshaping Tensors
Step25: Tensor Arithmetics
Step26: Element-wise Operations
Step27: Matrix Operations
Step28: Transpose
Step29: Matrix Multiplication
Step30: The Math Behind It
Step31: Now that we know the mean, we can go back to the original equation and calculate the standard deviation.
Step32: Note that standard deviation is a build in function in NumPy, TensorFlow and many other languages and libraries.
Step33: In TensorFlow
Step34: Variance $\sigma^2$, $s^2$ or $Var(X)$
Step35: Matrix Multiplication
Step36: Remember from before, that mathematical shape of a matrix is opposite of TensorFlow shape of a tensor. So instead of rewriting our arrays, we will just use transpose you make rows into columns and columns into rows.
Step37: Luckily there is also a easier way to to that.
|
3,769
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy
import scipy.stats
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# seed the random number generator so we all get the same results
numpy.random.seed(18)
weight = scipy.stats.lognorm(0.23, 0, 70.8)
weight.mean(), weight.std()
xs = numpy.linspace(20, 160, 100)
ys = weight.pdf(xs)
plt.plot(xs, ys, linewidth=4, color='C0')
plt.xlabel('weight (kg)')
plt.ylabel('PDF');
def make_sample(n=100):
sample = weight.rvs(n)
return sample
sample = make_sample(n=100)
sample.mean(), sample.std()
def sample_stat(sample):
return sample.mean()
def compute_sampling_distribution(n=100, iters=1000):
stats = [sample_stat(make_sample(n)) for i in range(iters)]
return numpy.array(stats)
sample_means = compute_sampling_distribution(n=100, iters=1000)
plt.hist(sample_means, color='C1', alpha=0.5)
plt.xlabel('sample mean (n=100)')
plt.ylabel('count');
sample_means.mean()
std_err = sample_means.std()
std_err
conf_int = numpy.percentile(sample_means, [5, 95])
conf_int
def plot_sampling_distribution(n, xlim=None):
Plot the sampling distribution.
n: sample size
xlim: [xmin, xmax] range for the x axis
sample_stats = compute_sampling_distribution(n, iters=1000)
se = numpy.std(sample_stats)
ci = numpy.percentile(sample_stats, [5, 95])
plt.hist(sample_stats, color='C1', alpha=0.5)
plt.xlabel('sample statistic')
plt.xlim(xlim)
text(0.03, 0.95, 'CI [%0.2f %0.2f]' % tuple(ci))
text(0.03, 0.85, 'SE %0.2f' % se)
plt.show()
def text(x, y, s):
Plot a string at a given location in axis coordinates.
x: coordinate
y: coordinate
s: string
ax = plt.gca()
plt.text(x, y, s,
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes)
plot_sampling_distribution(100)
def sample_stat(sample):
return sample.mean()
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_sampling_distribution, n=slider, xlim=fixed([55, 95]));
def sample_stat(sample):
# TODO: replace the following line with another sample statistic
return sample.mean()
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_sampling_distribution, n=slider, xlim=fixed([0, 100]));
class Resampler(object):
Represents a framework for computing sampling distributions.
def __init__(self, sample, xlim=None):
Stores the actual sample.
self.sample = sample
self.n = len(sample)
self.xlim = xlim
def resample(self):
Generates a new sample by choosing from the original
sample with replacement.
new_sample = numpy.random.choice(self.sample, self.n, replace=True)
return new_sample
def sample_stat(self, sample):
Computes a sample statistic using the original sample or a
simulated sample.
return sample.mean()
def compute_sampling_distribution(self, iters=1000):
Simulates many experiments and collects the resulting sample
statistics.
stats = [self.sample_stat(self.resample()) for i in range(iters)]
return numpy.array(stats)
def plot_sampling_distribution(self):
Plots the sampling distribution.
sample_stats = self.compute_sampling_distribution()
se = sample_stats.std()
ci = numpy.percentile(sample_stats, [5, 95])
plt.hist(sample_stats, color='C1', alpha=0.5)
plt.xlabel('sample statistic')
plt.xlim(self.xlim)
text(0.03, 0.95, 'CI [%0.2f %0.2f]' % tuple(ci))
text(0.03, 0.85, 'SE %0.2f' % se)
plt.show()
def interact_func(n, xlim):
sample = weight.rvs(n)
resampler = Resampler(sample, xlim=xlim)
resampler.plot_sampling_distribution()
interact_func(n=100, xlim=[50, 100])
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(interact_func, n=slider, xlim=fixed([50, 100]));
# Solution goes here
def interact_func2(n, xlim):
sample = weight.rvs(n)
resampler = StdResampler(sample, xlim=xlim)
resampler.plot_sampling_distribution()
interact_func2(n=100, xlim=[0, 100])
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(interact_func2, n=slider, xlim=fixed([0, 100]));
female_weight = scipy.stats.lognorm(0.23, 0, 70.8)
female_weight.mean(), female_weight.std()
male_weight = scipy.stats.lognorm(0.20, 0, 87.3)
male_weight.mean(), male_weight.std()
female_sample = female_weight.rvs(100)
male_sample = male_weight.rvs(100)
male_sample.mean() - female_sample.mean()
def CohenEffectSize(group1, group2):
Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / numpy.sqrt(pooled_var)
return d
CohenEffectSize(male_sample, female_sample)
class CohenResampler(Resampler):
def __init__(self, group1, group2, xlim=None):
self.group1 = group1
self.group2 = group2
self.xlim = xlim
def resample(self):
n, m = len(self.group1), len(self.group2)
group1 = numpy.random.choice(self.group1, n, replace=True)
group2 = numpy.random.choice(self.group2, m, replace=True)
return group1, group2
def sample_stat(self, groups):
group1, group2 = groups
return CohenEffectSize(group1, group2)
resampler = CohenResampler(male_sample, female_sample)
resampler.plot_sampling_distribution()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part One
Step2: Here's what that distribution looks like
Step3: make_sample draws a random sample from this distribution. The result is a NumPy array.
Step4: Here's an example with n=100. The mean and std of the sample are close to the mean and std of the population, but not exact.
Step5: We want to estimate the average weight in the population, so the "sample statistic" we'll use is the mean
Step6: One iteration of "the experiment" is to collect a sample of 100 women and compute their average weight.
Step7: The next line runs the simulation 1000 times and puts the results in
Step8: Let's look at the distribution of the sample means. This distribution shows how much the results vary from one experiment to the next.
Step9: The mean of the sample means is close to the actual population mean, which is nice, but not actually the important part.
Step10: The standard deviation of the sample means quantifies the variability from one experiment to the next, and reflects the precision of the estimate.
Step11: We can also use the distribution of sample means to compute a "90% confidence interval", which contains 90% of the experimental results
Step14: Now we'd like to see what happens as we vary the sample size, n. The following function takes n, runs 1000 simulated experiments, and summarizes the results.
Step15: Here's a test run with n=100
Step16: Now we can use interact to run plot_sampling_distribution with different values of n. Note
Step17: Other sample statistics
Step24: STOP HERE
Step25: The following function instantiates a Resampler and runs it.
Step26: Here's a test run with n=100
Step27: Now we can use interact_func in an interaction
Step28: Exercise 2
Step29: Test your code using the cell below
Step30: When your StdResampler is working, you should be able to interact with it
Step31: STOP HERE
Step32: And here's the men's distribution
Step33: I'll simulate a sample of 100 men and 100 women
Step34: The difference in means should be about 17 kg, but will vary from one random sample to the next
Step36: Here's the function that computes Cohen's effect size again
Step37: The difference in weight between men and women is about 1 standard deviation
Step38: Now we can write a version of the Resampler that computes the sampling distribution of $d$.
Step39: Now we can instantiate a CohenResampler and plot the sampling distribution.
|
3,770
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-2', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
3,771
|
<ASSISTANT_TASK:>
Python Code:
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# PyDrive reference:
# https://googledrive.github.io/PyDrive/docs/build/html/index.html
# 2. Create & upload a file text file.
# 특정 폴더 안으로 파일 삽입
uploaded = drive.CreateFile({'title': 'Sample upload.txt', "parents": [{"kind": "drive#fileLink","id": 'your_drive_id'}]})
uploaded.SetContentString('Sample upload file content')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# 3. Load a file by ID and print its contents.
downloaded = drive.CreateFile({'id': uploaded.get('id')})
print('Downloaded content "{}"'.format(downloaded.GetContentString()))
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
# mnist import
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# 0~1 사이의 값으로 정규화
X_train = X_train.reshape(60000, 784).astype('float32') / 255.0
X_test = X_test.reshape(10000, 784).astype('float32') / 255.0
# 원핫 인코딩
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
model = Sequential()
model.add(Dense(units=64, input_dim=28*28, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
hist = model.fit(X_train, Y_train, epochs=2, batch_size=32)
loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
# VM local root 경로에 모델파일 저장
model.save('mnist_mlp_model.h5')
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# PyDrive reference:
# https://googledrive.github.io/PyDrive/docs/build/html/index.html
# 2. Create & upload a file text file.
# 특정 폴더 안으로 파일 삽입
uploaded = drive.CreateFile({'title': 'mnist_mlp_model.h5', "parents": [{"kind": "drive#fileLink","id": 'your_drive_id'}]})
uploaded.SetContentString('Sample upload file content')
uploaded.SetContentFile('mnist_mlp_model.h5')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# 3. Load a file by ID and print its contents.
downloaded = drive.CreateFile({'id': uploaded.get('id')})
위 코드를 실행하면, 앞선 과정에서 저장했던 h5 파일이 드라이브의 내 폴더 안에 저장된다.
#- REST API로 드라이브에 있는 모델파일을 BytesIO로 다운로드
from google.colab import auth
auth.authenticate_user()
from googleapiclient.discovery import build
drive_service = build('drive', 'v3')
import io
from io import BytesIO
from googleapiclient.http import MediaIoBaseDownload
request = drive_service.files().get_media(fileId='11RMJNeeZLgUtuuvmyq3LkLBTnTc4vhHh')
downloaded = io.BytesIO()
downloader = MediaIoBaseDownload(downloaded, request)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print("Download %%%d%%." % int(status.progress() * 100))
print("Download Complete!")
downloaded.seek(0)
with open('/tmp/mnist_mlp_model.h5', 'wb') as f:
f.write(downloaded.read())
#REST API를 이용해 위에서 저장했던 파일을 다운받는다. 이 파일의 형태는 buffer의 형태이므로, VM의 tmp 폴더에 저장해준다.
from keras.models import load_model
loaded_model = load_model('/tmp/mnist_mlp_model.h5')
#그리고 tmp 폴더에 저장해둔 파일을 load하면 한 사이클이 끝이 난다.
## predict
from numpy import argmax
xhat_idx = np.random.choice(X_test.shape[0], 5)
xhat = X_test[xhat_idx]
yhat = loaded_model.predict_classes(xhat)
for i in range(5):
print('True : ' + str(argmax(Y_test[xhat_idx[i]])) + ', Predict : ' + str(yhat[i]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. 케라스 모델파일 읽기/쓰기
Step2: 4. root 경로에 저장된 모델파일을 드라이브의 원하는 폴더에 저장
|
3,772
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
def split_data(chars, batch_size, num_steps, split_frac=0.9):
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 50)
train_x.shape
train_x[:,:50]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# One-hot encoding the input and target characters
x_one_hot = tf.one_hot(inputs, num_classes)
y_one_hot = tf.one_hot(targets, num_classes)
### Build the RNN layers
# Use a basic LSTM cell
#lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)
# Add dropout to the cell
#drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
#cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
cell = tf.nn.rnn_cell.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
### Run the data through the RNN layers
# This makes a list where each element is on step in the sequence
#rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(x_one_hot, num_steps, 1)]
rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(1, num_steps, x_one_hot)]
# Run each sequence step through the RNN and collect the outputs
#outputs, state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=initial_state)
outputs, state = tf.nn.rnn(cell, rnn_inputs, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one output row for each step for each batch
#seq_output = tf.concat(outputs, axis=1)
seq_output = tf.concat(1, outputs)
output = tf.reshape(seq_output, [-1, lstm_size])
# Now connect the RNN putputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(num_classes))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and batch
logits = tf.matmul(output, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
preds = tf.nn.softmax(logits, name='predictions')
# Reshape the targets to match the logits
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
cost = tf.reduce_mean(loss)
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
# NOTE: I'm using a namedtuple here because I think they are cool
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
keep_prob = 0.5
epochs = 20
# Save every N iterations
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/i{}_l{}_v{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/____.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step5: Making training and validation batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: Looking at the size of this array, we see that we have rows equal to the batch size. When we want to get a batch out of here, we can grab a subset of this array that contains all the rows but has a width equal to the number of steps in the sequence. The first batch looks like this
Step8: I'll write another function to grab batches out of the arrays made by split_data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
Step9: Building the model
Step10: Hyperparameters
Step11: Training
Step12: Saved checkpoints
Step13: Sampling
Step14: Here, pass in the path to a checkpoint and sample from the network.
|
3,773
|
<ASSISTANT_TASK:>
Python Code:
import os
pgconfig = {
'host': os.environ['PGHOST'],
'port': os.environ['PGPORT'],
'database': os.environ['PGDATABASE'],
'user': os.environ['PGUSER'],
'password': os.environ['PGPASSWORD'],
}
%load_ext sql
dsl = 'postgres://{user}:{password}@{host}:{port}/{database}'.format(**pgconfig)
%sql $dsl
%%sql
select * from actor order by actor_id limit 10
%config SqlMagic.autopandas = True
df = %sql select * from actor order by actor_id
df.describe()
%%sql
WITH store_inv AS (
SELECT store_id, COUNT(*) AS inventory_cnt, COUNT(DISTINCT film_id) AS film_cnt
FROM inventory GROUP BY 1
), store_staff AS (
SELECT store_id, COUNT(*) AS staff_cnt
FROM staff GROUP BY 1
), store_customer AS (
SELECT store_id, COUNT(*) AS customer_cnt, SUM(active) AS active_customer_cnt
FROM customer GROUP BY 1
)
SELECT store.store_id, addr.address, addr.district, city.city, country.country,
store_inv.inventory_cnt, store_inv.film_cnt,
store_staff.staff_cnt,
store_customer.customer_cnt, store_customer.active_customer_cnt
FROM store
LEFT JOIN address addr USING (address_id)
LEFT JOIN city USING (city_id)
LEFT JOIN country USING (country_id)
LEFT JOIN store_inv USING (store_id)
LEFT JOIN store_staff USING (store_id)
LEFT JOIN store_customer USING (store_id)
ORDER BY 1
import seaborn as sns
%matplotlib inline
df = %sql select * from rental
df.head()
dt = []
for k in df.keys():
print('{:20s} : {:,} ({:,})'.format(k, df[k].count(), df[k].nunique()))
dd = df[df['return_date'].isnull()].groupby('customer_id').count()[['rental_id',]].reset_index()
dd.groupby('rental_id').count().reset_index().rename(
columns={'rental_id': 'rental_cnt', 'customer_id': 'customer_cnt'})
d = _
sns.barplot(x='rental_cnt', y='customer_cnt', data=d)
dd[dd['rental_id'] == 3]
customer_id = 75
c = %sql select * from customer where customer_id = :customer_id
c.T
customers = tuple([int(i) for i in dd[dd['rental_id'] == 3]['customer_id'].tolist()])
c = %sql select * from customer where customer_id in :customers
c.T
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 拡張を読み込み、データベースに接続します。
Step2: SQL を実行してその結果を確認
Step3: 実行結果を pandas のデータフレームに変換します。
Step4: 少し複雑な SQL を実行するため、店舗ごとの所在地、従業員数、顧客数を集計します。
Step5: 結果を集計してグラフ描画
Step6: rental テーブルに商品の貸し出し情報が入っており、貸出日が rental_date 、返却日が return_date に記録されています。商品のIDは inventory_id で、顧客IDは customer_id です。これらを使い、未返却DVDのある顧客数の分布を計算して棒グラフを描画してみます。
Step7: 属性ごとの項目数を確認します。
Step8: 16,044件の貸し出し情報があり、商品数は4,580件、顧客数は599人であることが分かります。また、返却日は15,861件ありますので、差し引きすると183件が未返却であると言えます。
Step9: この分布を棒グラフで表現してみます。(これくらいなら表の数値を見れば理解できる内容ではありますが)
Step10: 変数置換を利用したクエリの実行
Step11: 顧客IDが75の人が対象であることが分かりますので、 customer テーブルで具体的に確認します。このとき、SQL に数値をハードコードするのではなく、 ipython-sql の変数置換を利用できます。
Step12: 変数はリストでも受け付けることができますので、特定のIDをハードコードする必要もありません。SQL の WHERE 句では IN を使います。
|
3,774
|
<ASSISTANT_TASK:>
Python Code:
import heapq
# Create a priority queue abstract base class
class priority_queue:
# Initialize the instance
def __init__(self):
# Create a list to use as the queue
self._queue = []
# Create an index to use as ordering
self._index = 0
# Create a function to add a task to the queue
def add_task(self, item, priority):
# Push the arguments to the _queue using a heap
heapq.heappush(self._queue, (-priority, self._index, item))
# Add one to the index
self._index += 1
# Create a function to get the next item from the queue
def next_task(self):
# Return the next item in the queue
return heapq.heappop(self._queue)[-1]
# Create a priority queue called task_list
task_list = priority_queue()
# Add an item to the queue
task_list.add_task('Clean Dishes', 1)
# Add an item to the queue
task_list.add_task('Wash Car', 2)
# Add an item to the queue
task_list.add_task('Walk Dog', 3)
# Retrieve items from the queue
task_list.next_task()
# Retrieve items from the queue
task_list.next_task()
# Retrieve items from the queue
task_list.next_task()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create A Priority Queue Object
Step2: Add Items To Queue
Step3: Retrieve Items From Queue By Priority
|
3,775
|
<ASSISTANT_TASK:>
Python Code:
n = 100
x = np.random.normal(1, 0.5, n)
noise = np.random.normal(0, 0.25, n)
y = 0.75*x + 1 + noise
fig, ax = plt.subplots(1, 1, figsize=(6,4))
ax.scatter(x, y)
ax.set_xlim([0,2])
ax.set_ylim([0,3.1])
def fit(x,y, with_constant=True):
beta = np.cov(x,y)[0][1] / np.var(x)
if with_constant:
alpha = np.mean(y) - beta * np.mean(x)
else:
alpha = 0
r = np.cov(x,y)[0][1] / (np.std(x)*np.std(y))
mse = np.sum((y-alpha-beta*x)**2)/n
return([beta,alpha,r,mse])
beta,alpha,r,mse = fit(x,y)
print('alpha: {:.2f}, beta: {:.2f}'.format(alpha,beta))
print('r squared: {:.2f}'.format(r*r))
print('MSE: {:.2f}'.format(mse))
def fit_plot(x,y,noise,beta,alpha):
fig, ax = plt.subplots(1, 3, figsize=(18,4))
ax[0].scatter(x, y)
x_ = np.linspace(0,2,5)
ax[0].plot(x_, alpha + beta*x_, color='orange', linewidth=2)
ax[0].set_xlim([0,2])
ax[0].set_ylim([0,3.1])
mse = np.sum((y-alpha-beta*x)**2)/n
ax[0].set_title('MSE: {:.2f}'.format(mse), fontsize=14)
ax[1].hist(noise, alpha=0.5, bins=np.arange(-0.7,0.7,0.1));
ax[1].hist(y - alpha - beta*x, alpha=0.5, bins=np.arange(-0.7,0.7,0.1))
ax[1].legend(['original noise', 'residual']);
stats.probplot(y - alpha - beta*x, dist="norm", plot=ax[2]);
fit_plot(x,y,noise,beta,alpha)
n = 200
x = np.random.normal(5, 2, n)
noise = np.random.normal(0, 0.25, n)
y = 0.75*x + 1 + noise
fig, ax = plt.subplots(1, 1, figsize=(6,4))
ax.scatter(x, y)
ax.set_xlim([0,10])
ax.set_ylim([0,10])
x_train = x[-40:]
x_test = x[:-40]
y_train = y[-40:]
y_test = y[:-40]
beta,alpha,r,mse = fit(x,y)
y_pred = alpha + beta*x_test
def pred_plot(y_pred,y_test):
fig, ax = plt.subplots(1, 2, figsize=(12,4))
ax[0].scatter(y_test, y_pred)
ax[0].plot([0,10],[0,10],color='g')
ax[0].set_xlim([0,10])
mse_pred = np.sum((y_test-y_pred)**2)/n
ax[0].set_title('MSE: {:.2f}'.format(mse_pred), fontsize=14)
ax[1].hist(y_test-y_pred);
pred_plot(y_pred,y_test)
n = 100
x = np.random.normal(1, 0.5, n)
noise = np.random.normal(0.25, 0.25, n)
y = 0.75*x + noise
beta,alpha,r,mse = fit(x,y, with_constant=False)
fit_plot(x,y,noise,beta,alpha)
n = 200
x = np.random.normal(5, 2, n)
noise = np.random.normal(1, 0.25, n)
y = 0.75*x + noise
x_train = x[-40:]
x_test = x[:-40]
y_train = y[-40:]
y_test = y[:-40]
beta,alpha,r,mse = fit(x,y, with_constant=False)
y_pred = beta*x_test
pred_plot(y_pred,y_test)
n = 100
x = np.random.normal(1, 0.5, n)
noise = np.random.normal(0.25, 0.25, n)
y = 0.75*x + noise
beta,alpha,r,mse = fit(x,y, with_constant=True)
fit_plot(x,y,noise,beta,alpha)
n = 200
x = np.random.normal(5, 2, n)
noise = np.random.normal(1, 0.25, n)
y = 0.75*x + noise
x_train = x[-40:]
x_test = x[:-40]
y_train = y[-40:]
y_test = y[:-40]
beta,alpha,r,mse = fit(x,y, with_constant=True)
y_pred = alpha+beta*x_test
pred_plot(y_pred, y_test)
n = 100
x = np.linspace(0, 2, n)
sigma2 = 0.1*x**2
noise = np.random.normal(0, np.sqrt(sigma2), n)
y = 0.75*x + 1 + noise
beta,alpha,r,mse = fit(x,y)
fit_plot(x,y,noise,beta,alpha)
n = 100
x = np.linspace(0, 2, n)
sigma2 = 0.1*((6*x).astype(int) % 4)
noise = np.random.normal(0, np.sqrt(sigma2), n)
y = 0.75*x + 1 + noise
beta,alpha,r,mse = fit(x,y)
fit_plot(x,y,noise,beta,alpha)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple linear regression considers the model function
Step2: Expected value
Step3: We can fit a linear regression model on the training data
Step4: And use it to predict $y$ on test data
Step5: What could possibly go wrong? Assumptions for OLS
Step6: What would it mean for a prediction model?
Step7: But if we have a constant in our model, it is unbiased
Step8: <span style='color
Step9: When this assumption is violated, we will turn to a weighted least squares estimator.
|
3,776
|
<ASSISTANT_TASK:>
Python Code:
%cd -q ~/neurokernel/examples/olfaction/data
%run gen_olf_input.py
%run create_olf_gexf.py
import re
import networkx as nx
import neurokernel.tools.graph
g = nx.read_gexf('antennallobe.gexf.gz')
df_node, df_edge = neurokernel.tools.graph.graph_to_df(g)
glom_name = 'DA1'
osn_ind = sorted(list(set([ind[0] for ind in \
df_edge[df_edge.name.str.contains('.*-%s_.*' % glom_name)].index])))
pn_ind = sorted(list(set([ind[1] for ind in \
df_edge[df_edge.name.str.contains('.*-%s_.*' % glom_name)].index])))
# Get OSN and PN label indices:
osn_ind_labels = [int(re.search('osn_.*_(\d+)', name).group(1)) \
for name in df_node.ix[osn_ind].name]
pn_ind_labels = [int(re.search('.*_pn_(\d+)', name).group(1)) \
for name in df_node.ix[pn_ind].name]
%cd -q ~/neurokernel/examples/olfaction
%run olfaction_demo.py
%matplotlib inline
import h5py
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
fmt = lambda x, pos: '%2.2f' % (float(x)/1e4)
with h5py.File('./data/olfactory_input.h5', 'r') as fi, \
h5py.File('olfactory_output_spike.h5','r') as fo:
data_i = fi['array'].value
data_o = fo['array'].value
mpl.rcParams['figure.dpi'] = 120
mpl.rcParams['figure.figsize'] = (12,9)
raster = lambda data: plt.eventplot([np.nonzero(data[i, :])[0] for i in xrange(data.shape[0])],
colors = [(0, 0, 0)],
lineoffsets = np.arange(data.shape[0]),
linelengths = np.ones(data.shape[0])/2.0)
f = plt.figure()
plt.subplot(311)
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.FuncFormatter(fmt))
plt.plot(data_i[:10000, 0]);
ax.set_ylim(np.min(data_i)-1, np.max(data_i)+1)
ax.set_xlim(0, 10000)
plt.title('Input Stimulus'); plt.ylabel('Concentration')
plt.subplot(312)
raster(data_o.T[osn_ind, :])
plt.title('Spikes Generated by OSNs'); plt.ylabel('OSN #');
ax = plt.gca()
ax.set_ylim(np.min(osn_ind_labels), np.max(osn_ind_labels))
ax.xaxis.set_major_formatter(ticker.FuncFormatter(fmt))
ax.yaxis.set_major_locator(ticker.MultipleLocator(base=5.0))
plt.subplot(313)
raster(data_o.T[pn_ind, :])
plt.title('Spikes Generated by PNs'); plt.ylabel('PN #');
ax = plt.gca()
ax.set_ylim(np.min(pn_ind_labels)-0.5, np.max(pn_ind_labels)+0.5)
ax.xaxis.set_major_formatter(ticker.FuncFormatter(fmt))
ax.yaxis.set_major_locator(ticker.MultipleLocator(base=1.0))
plt.xlabel('time (s)')
plt.subplots_adjust()
f.savefig('olfactory_output.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we identify the indices of the olfactory sensory neurons (OSNs) and projection neurons (PNs) associated with a specific glomerulus; in this case, we focus on glomerulus DA1
Step2: We now execute the model
Step3: Next, we display the input odorant concentration profile and the spikes produced by the 25 OSNs and 3 PNs associated with glomerulus DA1 in the model
|
3,777
|
<ASSISTANT_TASK:>
Python Code:
NAME = "dchud"
COLLABORATORS = ""
!wget --quiet -O boating.csv "https://data.ct.gov/api/views/mrb6-7ee5/rows.csv?accessType=DOWNLOAD"
!head boating.csv | csvlook
!wc -l boating.csv
!csvstat boating.csv
!mv boating.csv boating-orig.csv
!echo "year,tx_type,num" > boating.csv
!tail -n +2 boating-orig.csv >> boating.csv
!head boating.csv | csvlook
!csvsql --db sqlite:///boating.db --insert boating.csv
%load_ext sql
%sql sqlite:///boating.db
%%sql
SELECT COUNT(*)
FROM boating;
%%sql
SELECT *
FROM boating
LIMIT 10;
%%sql
SELECT *
FROM boating
WHERE tx_type = "BOAT CHANGE OF TYPE";
%%sql
SELECT COUNT(*)
FROM boating
WHERE tx_type = "BOAT CHANGE OF TYPE";
%%sql
SELECT *
FROM boating
WHERE tx_type = "BOAT CHANGE OF TYPE"
ORDER BY num DESC;
%%sql
SELECT year
FROM boating;
df = _.DataFrame()
for y in [2008, 2009, 2010, 2011, 2012, 2013, 2014]:
assert y in df.year.values
assert len(df) == 7
-- YOUR CODE HERE
df = _.DataFrame()
assert len(df) == 21
for tx_type in ["BOAT, DUPLICATE REGISTRATION", "REN BOAT PONTOON", "BOAT REG, CANOE"]:
assert tx_type in df.tx_type.values
-- YOUR CODE HERE
df = _.DataFrame()
df.rename(columns=lambda x: "year" if x == "year" else "count", inplace=True)
assert (df.loc[df['year'] == 2009]['count'] == 21).all()
assert (df.loc[df['year'] == 2013]['count'] == 20).all()
-- YOUR CODE HERE
df = _.DataFrame()
df.rename(columns=lambda x: "tx_type" if x == "tx_type" else "sum", inplace=True)
assert (df.loc[df['tx_type'] == "BOAT REG, CANOE"]['sum'] == 1129).all()
assert (df.loc[df['tx_type'] == "REN BOAT REGISTRATION"]['sum'] == 640790).all()
-- YOUR CODE HERE
df = _.DataFrame()
df.rename(columns=lambda x: "tx_type" if x == "tx_type" else "sum_num", inplace=True)
assert len(df) == 5
assert (df.loc[df['tx_type'] == "REN BOAT PONTOON"]['sum_num'] == 15556).all()
assert (df.loc[df['tx_type'] == "REN BOAT REGISTRATION"]['sum_num'] == 640790).all()
assert df['sum_num'].max() == 640790
assert df['sum_num'].min() == 2390
-- YOUR CODE HERE
df = _.DataFrame()
assert len(df) == 5
assert (df.loc[df['tx_type'] == "REN BOAT PONTOON"]['num'] == 2118).all()
assert (df.loc[df['tx_type'] == "BOAT REG, NEW"]['num'] == 12569).all()
assert df['num'].max() == 94005
assert df['num'].min() == 555
-- YOUR CODE HERE
df = _.DataFrame()
assert df.values[0][0] in [7, 9]
-- YOUR CODE HERE
df = _.DataFrame()
assert df.values[0][0] == 14
-- YOUR CODE HERE
df = _.DataFrame()
assert df.values[0][0] in [12071, 14466]
-- YOUR CODE HERE
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 03 - Due Friday, September 30 at 12pm
Step2: Verify that it's what we think it is on the commandline
Step3: Looks right. How many records are there?
Step4: So that should be 145, counting the header. And the basic stats
Step5: Looks about right!
Step6: Okay, using output redirection and tail we can write a new header line.
Step7: Much easier to work with now.
Step8: To work with it, we'll need the ipython-sql extension loaded, and then we'll need to connect to the db.
Step9: Note if you see a pink box above with six lines of "ShimWarning" and "UserWarning", don't panic. This is just a warning message from the ipython-sql package. You are good to go.
Step10: Looks like the same number of rows! We're good to go.
Step11: This should look familiar!
Step12: How many is that, and which year had the most?
Step13: Which year had the most of these transactions?
Step14: ...alright, your turn.
Step15: Question 2
Step16: Question 3
Step17: Question 4
Step18: Question 5
Step19: Question 6
Step20: Question 7
Step21: Question 8
Step22: Question 9
Step23: Question 10
|
3,778
|
<ASSISTANT_TASK:>
Python Code:
import pprint
document = "Human machine interface for lab abc computer applications"
text_corpus = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey",
]
# Create a set of frequent words
stoplist = set('for a of the and to in'.split(' '))
# Lowercase each document, split it by white space and filter out stopwords
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in text_corpus]
# Count word frequencies
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
# Only keep words that appear more than once
processed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]
pprint.pprint(processed_corpus)
from gensim import corpora
dictionary = corpora.Dictionary(processed_corpus)
print(dictionary)
pprint.pprint(dictionary.token2id)
new_doc = "Human computer interaction"
new_vec = dictionary.doc2bow(new_doc.lower().split())
print(new_vec)
bow_corpus = [dictionary.doc2bow(text) for text in processed_corpus]
pprint.pprint(bow_corpus)
from gensim import models
# train the model
tfidf = models.TfidfModel(bow_corpus)
# transform the "system minors" string
words = "system minors".lower().split()
print(tfidf[dictionary.doc2bow(words)])
from gensim import similarities
index = similarities.SparseMatrixSimilarity(tfidf[bow_corpus], num_features=12)
query_document = 'system engineering'.split()
query_bow = dictionary.doc2bow(query_document)
sims = index[tfidf[query_bow]]
print(list(enumerate(sims)))
for document_number, score in sorted(enumerate(sims), key=lambda x: x[1], reverse=True):
print(document_number, score)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('run_core_concepts.png')
imgplot = plt.imshow(img)
_ = plt.axis('off')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The core concepts of gensim are
Step2: Corpus
Step3: .. Important
Step4: Before proceeding, we want to associate each word in the corpus with a unique
Step5: Because our corpus is small, there are only 12 different tokens in this
Step6: For example, suppose we wanted to vectorize the phrase "Human computer
Step7: The first entry in each tuple corresponds to the ID of the token in the
Step8: Note that while this list lives entirely in memory, in most applications you
Step9: The tfidf model again returns a list of tuples, where the first entry is
Step10: and to query the similarity of our query document query_document against every document in the corpus
Step11: How to read this output?
Step12: Summary
|
3,779
|
<ASSISTANT_TASK:>
Python Code:
from notebook_preamble import D, J, V, define
J('[0 2 7 0] dup max')
from joy.library import SimpleFunctionWrapper
from joy.utils.stack import list_to_stack
@SimpleFunctionWrapper
def index_of(stack):
'''Given a sequence and a item, return the index of the item, or -1 if not found.
E.g.:
[a b c] a index_of
------------------------
0
[a b c] d index_of
------------------------
-1
'''
item, (sequence, stack) = stack
i = 0
while sequence:
term, sequence = sequence
if term == item:
break
i += 1
else:
i = -1
return i, stack
D['index_of'] = index_of
J('[0 2 7 0] 7 index_of')
J('[0 2 7 0] 23 index_of')
from joy.utils.stack import iter_stack, list_to_stack
@SimpleFunctionWrapper
def distribute(stack):
'''Starting at index+1 distribute count "blocks" to the "banks" in the sequence.
[...] count index distribute
----------------------------
[...]
'''
index, (count, (sequence, stack)) = stack
assert count >= 0
cheat = list(iter_stack(sequence))
n = len(cheat)
assert index < n
cheat[index] = 0
while count:
index += 1
index %= n
cheat[index] += 1
count -= 1
return list_to_stack(cheat), stack
D['distribute'] = distribute
J('[0 2 7 0] dup max [index_of] nullary distribute')
J('[2 4 1 2] dup max [index_of] nullary distribute')
J('[3 1 2 3] dup max [index_of] nullary distribute')
J('[0 2 3 4] dup max [index_of] nullary distribute')
J('[1 3 4 1] dup max [index_of] nullary distribute')
define('direco == dip rest cons')
define('G == [direco] cons [swap] swoncat cons')
define('make_distributor == [dup dup max [index_of] nullary distribute] G')
J('[0 2 7 0] make_distributor 6 [x] times pop')
define('count_states == [] swap x [pop index_of 0 >=] [popop size] [[swons] dip x] primrec')
define('AoC2017.6 == make_distributor count_states')
J('[0 2 7 0] AoC2017.6')
J('[1 1 1] AoC2017.6')
J('[8 0 0 0 0 0] AoC2017.6')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Starting at index distribute count "blocks" to the "banks" in the sequence.
Step2: Recalling "Generator Programs"
Step3: A function to drive a generator and count how many states before a repeat.
|
3,780
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import warnings
from scipy.optimize import minimize
from scipy.integrate import quad
from scipy.interpolate import interp1d
from scipy import stats
from importlib import reload
from src import sim_cts, sim_discrete
from scipy.stats import poisson, geom
import tensorflow as tf
## web graphics
%matplotlib inline
## interactive graphics
#%matplotlib notebook
## SVG graphics
#%config InlineBackend.figure_format = 'svg'
# plotting requirements
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from src.rng import default_random_state
#This is making a Graph implicity (or at least, puting operations onto the default graph)
# Create a Constant op that produces a 1x2 matrix. The op is
# added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
matrix1 = tf.constant([[3., 3.]])
# Create another Constant that produces a 2x1 matrix.
matrix2 = tf.constant([[2.],[2.]])
# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
# The returned value, 'product', represents the result of the matrix
# multiplication.
product = tf.matmul(matrix1, matrix2)
# Launch the session, which will execute, per default, the default Graph when we request operations from it..
sess = tf.Session()
# To run the matmul op we call the session 'run()' method, passing 'product'
# which represents the output of the matmul op. This indicates to the call
# that we want to get the output of the matmul op back.
#
# All inputs needed by the op are run automatically by the session. They
# typically are run in parallel.
#
# The call 'run(product)' thus causes the execution of three ops in the
# graph: the two constants and matmul.
#
# The output of the op is returned in 'result' as a numpy `ndarray` object.
result = sess.run(product)
print(result)
# ==> [[ 12.]]
# Close the Session when we're done.
sess.close()
with tf.Session() as sess:
result = sess.run(product)
print(result)
# Enter an interactive TensorFlow Session.
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])
# Initialize 'x' using the run() method of its initializer op.
x.initializer.run()
# Add an op to subtract 'a' from 'x'. Run it and print the result
sub = tf.sub(x, a)
print(sub.eval())
# ==> [-2. -1.]
# Close the Session when we're done.
sess.close()
x_data = np.random.rand(10000).astype(np.float32)
y_data = x_data * 5 + 0.3 + np.random.normal(scale=2, size=x_data.size)
plt.scatter(x_data, y_data, marker='.');
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))
sess.close()
a=tf.Variable([[-3.3, -2, -1,0,1,2,3],[-3, -2, -1,0,1,2,3],[-3, -2, -1,0,1,2,3],], name="a")
b=tf.maximum(tf.sign(a), 0)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
b.eval()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: basic use
Step2: Interactive style session
Step3: placeholders, variables, scope
Step4: So that worked beautifully with TF's own built-in optimizer. But how do we use an external optimiser?
|
3,781
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (5025, 5036)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
# len({word: None for word in source_text.split()}))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
source_id_text = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_text.split('\n')]
target_id_text = [[target_vocab_to_int[word] for word in sentence.split()] + [target_vocab_to_int['<EOS>']] for sentence in target_text.split('\n')]
return source_id_text, target_id_text
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None])
learn_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, learn_rate, keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for dencoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
td_end_removed = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
td_start_added = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), td_end_removed], 1)
return td_start_added
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
# TODO: Implement Function
# Encoder embedding
# source_vocab_size = len(source_letter_to_int)
# enc_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, 1000, rnn_size)
# Encoder
# enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
enc_LSTM = tf.contrib.rnn.BasicLSTMCell(rnn_size)
enc_LSTM = tf.contrib.rnn.DropoutWrapper(enc_LSTM, output_keep_prob=keep_prob)
enc_LSTM = tf.contrib.rnn.MultiRNNCell([enc_LSTM] * num_layers)
enc_RNN_out, enc_RNN_state = tf.nn.dynamic_rnn(enc_LSTM, rnn_inputs, dtype=tf.float32)
return enc_RNN_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State *
:param dec_cell: Decoder RNN Cell *
:param dec_embed_input: Decoder embedded input *
:param sequence_length: Sequence Length *
:param decoding_scope: TenorFlow Variable Scope for decoding *
:param output_fn: Function to apply the output layer *
:param keep_prob: Dropout keep probability
:return: Train Logits
# TODO: Implement Function
train_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state, name=None)
train_pred, fin_state, fin_cntxt_state = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell,\
train_decoder_fn,inputs=dec_embed_input,sequence_length=sequence_length,\
parallel_iterations=None, swap_memory=False,time_major=False, scope=decoding_scope, name=None)
train_logits = output_fn(train_pred)
return train_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state *
:param dec_cell: Decoder RNN Cell *
:param dec_embeddings: Decoder embeddings *
:param start_of_sequence_id: GO ID *
:param end_of_sequence_id: EOS Id *
:param maximum_length: The maximum allowed time steps to decode *
:param vocab_size: Size of vocabulary *
:param decoding_scope: TensorFlow Variable Scope for decoding *
:param output_fn: Function to apply the output layer *
:param keep_prob: Dropout keep probability
:return: Inference Logits
# TODO: Implement Function
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(output_fn, encoder_state, dec_embeddings,\
target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], maximum_length, vocab_size, dtype=tf.int32, name=None)
infer_logits, fin_state, fin_cntxt_state = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell,\
infer_decoder_fn, inputs=None, sequence_length=maximum_length,\
parallel_iterations=None, swap_memory=False,time_major=False, scope=decoding_scope, name=None)
# infer_logits = output_fn(infer_pred)
return infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length *
:param rnn_size: RNN Size *
:param num_layers: Number of layers *
:param target_vocab_to_int: Dictionary to go from the target words to an id *
:param keep_prob: Dropout keep probability *
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
# Decoder RNNs
dec_LSTM = tf.contrib.rnn.BasicLSTMCell(rnn_size)
dec_LSTM = tf.contrib.rnn.DropoutWrapper(dec_LSTM, output_keep_prob=keep_prob)
dec_LSTM = tf.contrib.rnn.MultiRNNCell([dec_LSTM] * num_layers)
# Create Output Function
with tf.variable_scope("decoding") as decoding_scope:
# Output Layer
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
# Train Logits
train_logits = decoding_layer_train(encoder_state, dec_LSTM,\
dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
# Infer Logits
infer_logits = decoding_layer_infer(encoder_state, dec_LSTM,\
dec_embeddings, target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], sequence_length, vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder **
:param target_data: Target placeholder **
:param keep_prob: Dropout keep probability placeholder **
:param batch_size: Batch Size **
:param sequence_length: Sequence Length **
:param source_vocab_size: Source vocabulary size **
:param target_vocab_size: Target vocabulary size **
:param enc_embedding_size: Decoder embedding size **
:param dec_embedding_size: Encoder embedding size **
:param rnn_size: RNN Size **
:param num_layers: Number of layers **
:param target_vocab_to_int: Dictionary to go from the target words to an id **
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
# Apply embedding to the input data for the encoder
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
# Encode the input
encoder_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob)
# Process target data
p_target_data = process_decoding_input(target_data, target_vocab_to_int, batch_size)
# Apply embedding to the target data for the decoder
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, p_target_data)
# Decode the encoded input
train_logits, infer_logits = decoding_layer(dec_embed_input, dec_embeddings, encoder_state,\
target_vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 128
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 128
decoding_embedding_size = 128
# Learning Rate
learning_rate = 0.005
# Dropout Keep Probability
keep_probability = 0.8
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
sentence = sentence.lower()
sequence = [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in sentence.split()]
return sequence
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
3,782
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import os
from time import time
from ipyparallel import Client
os.environ['RTDATAPATH'] = '/Users/mate/g/rt/data/refdata/'
import atmosphere as atm
from atmosphere.rt import pydisort
atm.refdata.setup_directory()
def create_atmosphere_model(**kw):
Setup layered atmospheric model using a default physical/thermal structure
determined by HASI, and composition from the GCMS, with aerosol
scattering propertier from DISR.
The titan dictionary contains the values used to determine the opacity
and scattering albedo of each atmospheric layer as a function of
wavelength. The dispersion axis is constrained by the k-coefficients that
are used, as specified by the methane kc_file.
ts = time()
titan = atm.structure.set_HASI_structure(nlev=21,method='split_at_tropopause')
atm.composition.set_abundances(titan, trace_gas={'m_H2':0.001})
atm.gas_opacity.set_methane(titan,
# kc_file='/Users/mate/g/rt/data/refdata/gas_opacity/kc_CH4.VIMS.v08.fits',
kc_file='/Users/mate/data/k/coeff/CH4/kc_CH4.HST.v01.fits',
)
atm.gas_opacity.set_cia(titan)
atm.aerosol.set_opacity(titan)
DISR = atm.aerosol.fit_DISR_phases()
atm.aerosol.set_aerosol_phase_moments(titan, DISR, nmom=32)
t_setup = time()-ts
titan['haze'].update({'ssalb':0.96})
titan.update({'radius':2575.,
'rsurf':0.10,
't_setup':t_setup,
})
if 'rsurf' in kw: titan.update({'rsurf':kw['rsurf']})
if 'verbose' in kw and kw['verbose']:
pstr = 'PyDISORT Titan atmosphere structure and opacity setup: {:6.1f} sec'
print(pstr.format(titan['t_setup']))
return titan
def setup_VIMS_calc(**kw):
Specify the viewing geometry and wavelegth range for the
radiative transfer calculation.
titan = create_atmosphere_model(**kw)
titan.update({'rt':{'spher':False,
'wav_range':(2.0,2.40),
'view':{'umu0':0.99,'umue': 0.90,'phi0': 10.0,'phie': 11.0},
}})
for k in ['view','wav_range']:
if k in kw: titan['rt'].update({k:kw[k]})
fi = lambda array, v: abs(array-v).argmin()
wav_indices = lambda array, wavs: tuple([fi(array, mu) for mu in wavs])
wav_mn, wav_mx = wav_indices(titan['wavelength'], titan['rt']['wav_range'])
titan['rt'].update({'wav_mn':wav_mn,
'wav_mx':wav_mx,
'nlam':wav_mx-wav_mn+1,
})
return titan
titan = setup_VIMS_calc(wav_range=(0.8900,0.8901), verbose=True )
pydisort.calculate_spectrum(titan)
titan['rt']
def spectest(model):
for i in range(model['rt']['wav_mn'],model['rt']['wav_mx']):
print(i, model['wavelength'][i], model['spectrum'][i])
spectest(titan)
from astropy.io import fits
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(titan['wavelength'], titan['spectrum'])
ax.set_ylim(0.18,0.20)
from ipyparallel import parallel, Client
%%px
import os
os.environ['RTDATAPATH'] = '/Users/mate/g/rt/data/refdata/'
pwd
titan = setup_VIMS_calc(wav_range=(0.8850,0.8890))
pydisort.ipcluster_spectrum_calculation(titan)
titan['rt']
VIMS_test = setup_VIMS_calc(
rsurf=0.25,
wav_range=(1.5,3.5),
view={'umu0':np.cos(73.13*(np.pi/180)),
'umue':np.cos(51.93*(np.pi/180)),
'phi0': 272.4,
'phie': 360-111.4},
verbose=True,
)
pydisort.cluster_spectrum_calculation(VIMS_test)
VIMS_test['rt']
%matplotlib inline
import matplotlib.pyplot as plt
url = 'http://w.astro.berkeley.edu/~madamkov/refdata/test/VIMS.dat'
obs = np.genfromtxt(url,names=['wav','ref','cloud'])
fig, ax = plt.subplots()
ax.plot(obs['wav'], obs['cloud'], 'b-', drawstyle='steps-mid', label='cloud')
ax.plot(obs['wav'], obs['ref'], 'g-', drawstyle='steps-mid', label='reference')
ax.plot(VIMS_test['wavelength'], VIMS_test['spectrum'], 'r-', drawstyle='steps-mid', label='PyDISORT')
ax.set_xlim(1.2,3.5) ;
ax.legend(loc='best')
ax.set_xlabel('wavelength (um)') ; ax.set_ylabel('I/F') ;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Define methods for generating an atmospheric model of Titan and performing RT calculation
Step4: Parallel (multi-core) execution
Step5: Compare calculations with observed VIMS spectrum
|
3,783
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import calendar
import datetime
import matplotlib.dates as mdates
from API_client.python.datahub import datahub_main
from API_client.python.lib.dataset import dataset
from API_client.python.lib.variables import variables
import matplotlib
import warnings
warnings.filterwarnings("ignore")
matplotlib.rcParams['font.family'] = 'Avenir Lt Std'
print (matplotlib.__version__)
apikey = open('APIKEY').readlines()[0].strip()
dh = datahub_main(apikey)
ds = dataset('ncep_cfsv2', dh, debug=False)
ds2 = dataset('ncep_cfsr_climatologies', dh, debug=False)
ds.variables()
ds.vars = variables(ds.variables(), {'reftimes':ds.reftimes,'timesteps':ds.timesteps},ds)
ds2.vars = variables(ds2.variables(), {},ds2)
start_date = datetime.datetime.now() - datetime.timedelta(days=9)
end_date = datetime.datetime.now() + datetime.timedelta(days=5)
reftime_start = start_date.strftime('%Y-%m-%d') + 'T00:00:00'
reftime_end = end_date.strftime('%Y-%m-%d') + 'T18:00:00'
locations = ['Tallinn','Innsbruck','Paris','Berlin','Lisbon']
for locat in locations:
ds2.vars.TMAX_2maboveground.get_values_analysis(count=1000, location=locat)
ds.vars.Temperature_height_above_ground.get_values(count=1000, location=locat, reftime=reftime_start,
reftime_end=reftime_end)
def clean_table(loc):
ddd_clim = ds2.vars.TMAX_2maboveground.values[loc][['time','TMAX_2maboveground']]
ddd_temp = ds.vars.Temperature_height_above_ground.values[loc][['reftime','time','Temperature_height_above_ground']]
dd_temp=ddd_temp.set_index('time')
return ddd_clim,dd_temp
def resample_1month_totals(loc):
reft_unique = ds.vars.Temperature_height_above_ground.values[loc]['reftime'].unique()
nf_tmp = []
for reft in reft_unique:
abc = dd_temp[dd_temp.reftime==reft].resample('M').mean()
abc['Temperature_height_above_ground'+'_'+reft.astype(str)] = \
abc['Temperature_height_above_ground'] - 272.15
del abc['Temperature_height_above_ground']
nf_tmp.append(abc)
nf2_tmp = pd.concat(nf_tmp,axis=1)
return nf2_tmp
def reindex_clim_convert_temp():
i_new = 0
ddd_clim_new_indxes = ddd_clim.copy()
new_indexes = []
converted_temp = []
for i,clim_values in enumerate(ddd_clim['TMAX_2maboveground']):
if i == 0:
i_new = 12 - nf2_tmp.index[0].month + 2
else:
i_new = i_new + 1
if i_new == 13:
i_new = 1
new_indexes.append(i_new)
converted_temp.append(ddd_clim_new_indxes['TMAX_2maboveground'][i] -273.15)
ddd_clim_new_indxes['new_index'] = new_indexes
ddd_clim_new_indxes['tmp_c'] = converted_temp
return ddd_clim_new_indxes
def make_image(loc):
fig=plt.figure(figsize=(10,8))
ax = fig.add_subplot(111)
plt.ylim(np.min(np.min(nf2_tmp))-3,np.max(np.max(nf2_tmp))+3)
plt.boxplot(nf2_tmp,medianprops=dict(color='#1B9AA0'))
dates2 = [n.strftime('%b %Y') for n in nf2_tmp.index]
if len(np.arange(1, len(dates2)+1))== len(ddd_clim_indexed.sort_values(by=['new_index'])['tmp_c'][:-3]):
clim_len = -3
else:
clim_len = -2
plt.plot(np.arange(1, len(dates2)+1),ddd_clim_indexed.sort_values(by=['new_index'])['tmp_c'][:clim_len],"*",color='#EC5840',linestyle='-')
plt.xticks(np.arange(1, len(dates2)+1), dates2, rotation='vertical')
plt.grid(color='#C3C8CE',alpha=1)
plt.ylabel('Monthly Temperature [C]')
ttl = plt.title('Monthly Temperature in ' + loc,fontsize=15,fontweight='bold')
ttl.set_position([.5, 1.05])
fig.autofmt_xdate()
#plt.savefig('Monthly_mean_temp_cfsv2_forecast_{0}.png'.format(loc),dpi=300,bbox_inches='tight')
plt.show()
for locat in locations:
ddd_clim,dd_temp = clean_table(locat)
nf2_tmp = resample_1month_totals(locat)
ddd_clim_indexed = reindex_clim_convert_temp()
make_image(locat)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The API needs a file APIKEY with your API key in the work folder. We initialize a datahub and dataset objects.
Step2: At the moment we are going to look into Tallinn, Innsbruck, Paris, Berlin and Lisbon temperature. In order to the automatic location selection to work, add your custom location to the API_client.python.lib.predef_locations file and after add your location into the list of locations here.
Step3: Here we clean the table just a bit and create time based index.
Step4: Next, we resample the data to 1-month totals.
Step5: Give new indexes to climatology dataframe to have data ordered the same way as cfsv2 forecast.
Step6: Finally, we are visualizing the monthly mean temperature for each different forecast, in a single plot.
Step7: Below we can find five location graphs. It's forecasted that November seems to be quite similar to climatological mean (red line) while December might be much warmer than usual. January again is pretty similar to the climatology. After January all the months are forecasted to be colder as average, specially April and May.
|
3,784
|
<ASSISTANT_TASK:>
Python Code:
#Import libraries
import os
import glob
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.misc import imread, imresize
from random import shuffle
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
#Initial data inside simpsons.tar.gz
info = pd.read_csv('./the-simpsons-characters-dataset-MEDIUM/number_pic_char.csv')
print(info)
def get_character_directories(path):
character_directories = os.listdir(path)
if '.DS_Store' in character_directories: character_directories.remove('.DS_Store')
return character_directories
def get_images_from_directory(path):
image_files = os.listdir(path)
if '.DS_Store' in image_files: image_files.remove('.DS_Store')
return image_files
#root pats
train_root_path = "./the-simpsons-characters-dataset-MEDIUM/simpsons_dataset"
test_root_path = "./the-simpsons-characters-dataset-MEDIUM/kaggle_simpson_testset"
#train directories
character_directories = get_character_directories(train_root_path)
def test_paths():
#configure train dataset
print("Train: {}".format(character_directories[:5]))
#images from one train directory
character_path = train_root_path + "/" + character_directories[1]
train_image_names = get_images_from_directory(character_path)
print("Images of {}: {}".format(character_directories[1], train_image_names[:5]))
#configure test dataset
test_image_names = get_images_from_directory(test_root_path)
print("\nTest: {}".format(test_image_names[:5]))
test_paths()
def plot_image(image):
plt.figure(figsize=(3, 3))
plt.imshow(image)
plt.axis('off')
plt.show()
def show_train_image(character_directory, idx):
directory = os.path.join(train_root_path, character_directory)
sample_file = get_images_from_directory(directory)[idx]
path_file = os.path.join(directory, sample_file)
sample_image = imread(path_file)
print("Label:{}, Image:{}, Shape:{}".format(character_directory, idx, sample_image.shape))
plot_image(sample_image)
idx = random.randint(0, 10)
show_train_image(character_directories[idx], idx)
def show_test_image(idx):
test_image_names = get_images_from_directory(test_root_path)
sample_file, sample_name = test_image_names[idx], test_image_names[idx].split('_')[:-1]
path_file = os.path.join(test_root_path, sample_file)
sample_image = imread(path_file)
print("Label:{}, Image:{}, Shape:{}".format('_'.join(sample_name), idx, sample_image.shape))
plot_image(sample_image)
idx = random.randint(0, 10)
show_test_image(idx)
'''
Input 'different_characters' will define the number of different Simpson
characters the generator is going to return
@Return: tuple (images list, names list)
'''
#Create the batch generator
def batch_generator_training_images(batch_size, different_characters, verbose=False):
#Obtain the characters Serie with the most number of images
characters = info.sort_values(by='total', ascending=False).name
#transform to list with the following format: Homer Simpson -> homer_simpson
characters = characters.map(lambda x: x.replace(" ", "_").lower()).tolist()
path_name_list = list()
for i in range(0, different_characters):
directory = os.path.join(train_root_path, characters[i])
path_name_list += list(map( lambda x: (os.path.join(directory, x), characters[i]),
get_images_from_directory(directory)))
#shuffle all the images
shuffle(path_name_list)
#getting a number of complete chunks according to the batch size
number_complete_chunks = len(path_name_list)//batch_size
num_images_in_all_chunks = number_complete_chunks * batch_size
for b in range(0, num_images_in_all_chunks, batch_size):
if verbose:
print("init: {}, end:{}".format(b, b+batch_size))
yield (list(map( lambda x: imread(x[0]), path_name_list[b:b + batch_size])), #x == path
list(map( lambda x: x[1], path_name_list[b:b + batch_size])) ) #x == name
#testing how to show an image from generator
def testing_generator_1():
for batch in batch_generator_training_images(32, 10, True):
for img, name in zip(*batch):
print(name)
plot_image(img)
break
pass
break
testing_generator_1()
#testing the generator
def testing_generator_2():
group = {}
for batch in batch_generator_training_images(8, 10, True):
for img, name in zip(*batch):
if not img.shape in group:
group[img.shape] = []
group[img.shape].append(img)
pass
break
for key, lists in group.items():
print("shape{} count:{}".format(key, len(lists)))
testing_generator_2()
#global variable
num_classes = 10 #different number of Simpons characters
#one hot encoder
lb = preprocessing.LabelBinarizer()
def customize_onehot_encoder(num_classes):
#Obtain the characters Serie with the most number of images
characters = info.sort_values(by='total', ascending=False).name
#transform to list with the following format: Homer Simpson -> homer_simpson
names = characters[:num_classes].map(lambda x: x.replace(" ", "_").lower()).tolist()
print("Character classes: {}\n".format(names))
lb.fit(names)
#global variable
file_prefix = "spp_simpson_train_"
batch_size = 1024
import pickle
def create_processed_files(batch_size=64):
customize_onehot_encoder(num_classes)
# Format of data => {shape: {'images':[], 'labels':[]} }
data = {}
'''
The batch generator returns a tuple of two lists!!! (list_images, list_names)
'''
for cnt, batch in enumerate(batch_generator_training_images(batch_size, num_classes)):
#group by shape
grouped = {}
for image, name in zip(*batch):
if not image.shape in grouped:
grouped[image.shape] = []
grouped[image.shape].append((image, name))
#training images which are grouped by shape at the same time
for shape, tuple_imagesList_namesList in grouped.items():
imagesList, namesList = zip(*tuple_imagesList_namesList)
#casting
images = np.array(imagesList)
#onehot encoding names
names = np.array(lb.transform(namesList))
if len(shape) == 3: #just in case dimension are less than 3
#print("Storing shape:{} with {}".format(shape, len(images)))
data[shape] = {'images': images, 'labels': names}
else:
print("W: Dim image < 3")
#save into disk
with open("{}{}.pkl".format(file_prefix, cnt), 'wb') as file:
pickle.dump(data, file, pickle.HIGHEST_PROTOCOL)
print("{} saved".format("{}{}.pkl".format(file_prefix, cnt)))
create_processed_files(batch_size)
from keras.models import Sequential
from keras.layers import Conv2D, Convolution2D, Activation, MaxPooling2D, Dense, Dropout
from spp.SpatialPyramidPooling import SpatialPyramidPooling
'''
Build a model with a typical network architecture
'''
def build_model(num_channels = 3, num_classes = 10, filters = 32):
model = Sequential()
#Note: leave the image size as None to allow multiple image sizes
model.add(Conv2D(filters, (3, 3), padding='same', input_shape=(None, None, num_channels)))
model.add(Activation('relu'))
model.add(Conv2D(filters, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters*2, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters*2, (3, 3)))
model.add(Activation('relu'))
model.add(SpatialPyramidPooling([1, 2, 4]))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
build_model().summary()
#TESTING
# train on images with different sizes
def model_tried_out(num_channels, num_classes, batch_size):
model = build_model(num_channels, num_classes)
#size 1
model.fit(np.random.rand(batch_size, 8, 9, num_channels),
np.random.rand(batch_size, num_classes),
epochs=1)
#size 2
model.fit(np.random.rand(batch_size, 20, 13, num_channels),
np.random.rand(batch_size, num_classes),
epochs=1)
model_tried_out(2, 2, 16)
#global variables
num_channels = 3
def train_model():
customize_onehot_encoder(num_classes)
model = build_model(num_channels, num_classes)
num_files = len(glob.glob("{}*".format(file_prefix)))
for i in range(num_files):
fname = "{}{}.pkl".format(file_prefix, i)
if os.path.exists(fname):
with open(fname, 'rb') as file:
data = pickle.load(file)
print("\nProcessing file: {}".format(fname))
for shape, data in data.items():
print("Training shape: {}".format(shape))
val_size = 0.3 if len(data['images']) > 3 else 0
#X_train, X_vale, y_train, y_vale = train_test_split(data['images'], data['labels'],
# test_size=test_size)
model.fit(data['images'], data['labels'], validation_split=val_size, epochs=1, verbose=1)
return model
model1 = train_model()
def eyeball_test_model(model):
idx = random.randint(0, 100)
test_image_names = get_images_from_directory(test_root_path)
sample_file, sample_name = test_image_names[idx], test_image_names[idx].split('_')[:-1]
path_file = os.path.join(test_root_path, sample_file)
sample_image = imread(path_file)
print("Label: {}".format('_'.join(sample_name)))
#prediction
pred = model.predict(sample_image[np.newaxis, :])
print("PREDICTION: {}".format(lb.inverse_transform(pred)[0]))
#showing the image
plot_image(sample_image)
eyeball_test_model(model1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Showing Training and Test Samples
Step2: Training Images
Step3: Storing Processed Files On Disk
Step4: Since here, the images are processed and stored in different pickle files.
Step5: Training The Model Using The Processed Files
Step6: Testing The Model
|
3,785
|
<ASSISTANT_TASK:>
Python Code:
# Import required modules
import pandas as pd
import numpy as np
data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'age': [42, 52, 36, 24, 73],
'preTestScore': [4, 24, 31, 2, 3],
'postTestScore': [25, 94, 57, 62, 70]}
df = pd.DataFrame(data, columns = ['name', 'age', 'preTestScore', 'postTestScore'])
df
# Create a new column called df.elderly where the value is yes
# if df.age is greater than 50 and no if not
df['elderly'] = np.where(df['age']>=50, 'yes', 'no')
# View the dataframe
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a dataframe
Step2: Add a new column for elderly
|
3,786
|
<ASSISTANT_TASK:>
Python Code:
from cobra.io import load_model
model = load_model("textbook")
model.medium
medium = model.medium
medium["EX_o2_e"] = 0.0
model.medium = medium
model.medium
model.slim_optimize()
model.medium["EX_co2_e"] = 0.0
model.medium
medium = model.medium
medium["EX_co2_e"] = 0.0
model.medium = medium
model.medium # now it worked
model = load_model("textbook")
with model:
medium = model.medium
medium["EX_o2_e"] = 0.0
model.medium = medium
print(model.slim_optimize())
print(model.slim_optimize())
model.medium
from cobra.medium import minimal_medium
max_growth = model.slim_optimize()
minimal_medium(model, max_growth)
minimal_medium(model, 0.1, minimize_components=True)
minimal_medium(model, 0.8, minimize_components=8, open_exchanges=True)
ecoli = load_model("iJO1366")
ecoli.exchanges[0:5]
ecoli.demands
ecoli.sinks
ecoli.boundary[0:10]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This will return a dictionary that contains the upper flux bounds for all active exchange fluxes (the ones having non-zero flux bounds). Right now we see that we have enabled aerobic growth. You can modify a growth medium of a model by assigning a dictionary to model.medium that maps exchange reactions to their respective upper import bounds. For now let us enforce anaerobic growth by shutting off the oxygen import.
Step2: As we can see oxygen import is now removed from the list of active exchanges and we can verify that this also leads to a lower growth rate.
Step3: There is a small trap here. model.medium can not be assigned to directly. So the following will not work
Step4: As you can see EX_co2_e is not set to zero. This is because model.medium is just a copy of the current exchange fluxes. Assigning to it directly with model.medium[...] = ... will not change the model. You have to assign an entire dictionary with the changed import flux upper bounds
Step5: Setting the growth medium also connects to the context manager, so you can set a specific growth medium in a reversible manner.
Step6: So the medium change is only applied within the with block and reverted automatically.
Step7: So we see that growth is actually limited by glucose import.
Step8: When minimizing the number of import fluxes there may be many alternative solutions. To obtain several of those you can also pass a positive integer to minimize_components which will give you at most that many alternative solutions. Let us try that with our model and also use the open_exchanges argument which will assign a large upper bound to all import reactions in the model. The return type will be a pandas.DataFrame.
Step9: So there are 4 alternative solutions in total. One aerobic and three anaerobic ones using different carbon sources.
Step10: For demand reactions
Step11: For sink reactions
Step12: All boundary reactions (any reaction that consumes or introduces mass into the system) can be obtained with the boundary attribute
|
3,787
|
<ASSISTANT_TASK:>
Python Code:
import random
random.seed(1)
a = [7, 1, 2, 8, 1, 3]
rnd_lst = random.sample(range(0, 10000000), 1000)
import sys
def max_pairprod_1(ary):
n = len(ary)
max_prod = -sys.float_info.max
for i in range(0, n):
for j in range(i + 1, n):
prod = ary[i] * ary[j]
if prod > max_prod:
max_prod = prod
return max_prod
print(max_pairprod_1(ary=a))
print(max_pairprod_1(ary=rnd_lst))
import sys
def max_pairprod_2(ary):
pos_1 = -sys.float_info.max
pos_2 = -sys.float_info.max
for i in range(0, len(ary)):
if ary[i] > pos_1:
tmp = pos_1
pos_1 = ary[i]
if tmp > pos_2:
pos_2 = tmp
elif ary[i] > pos_2:
pos_2 = ary[i]
return pos_1 * pos_2
print(max_pairprod_2(ary=a))
print(max_pairprod_2(ary=rnd_lst))
%timeit -n 1000 -r 3 max_pairprod_1(ary=rnd_lst)
%timeit -n 1000 -r 3 max_pairprod_2(ary=rnd_lst)
import timeit
funcs = ['max_pairprod_1', 'max_pairprod_2']
orders_n = [10**n for n in range(1, 5)]
times_n = {f:[] for f in funcs}
for n in orders_n:
rnd_lst = random.sample(range(0, 10**6), n)
for f in funcs:
times_n[f].append(min(timeit.Timer('%s(rnd_lst)' % f,
'from __main__ import %s, rnd_lst' % f)
.repeat(repeat=3, number=5)))
%matplotlib inline
import matplotlib.pyplot as plt
def plot_timing():
labels = [('max_pairprod_1', 'max_pairprod_1'),
('max_pairprod_2', 'max_pairprod_2')]
plt.rcParams.update({'font.size': 12})
fig = plt.figure(figsize=(10, 8))
for lb in labels:
plt.plot(orders_n, times_n[lb[0]],
alpha=0.5, label=lb[1], marker='o', lw=3)
plt.xlabel('sample size n')
plt.ylabel('time per computation in milliseconds [ms]')
plt.legend(loc=2)
plt.ylim([-1, 60])
plt.grid()
plt.show()
plot_timing()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The probably simplest implementation may be to compute the product of each pair in this list while keeping track of the largest product as we traverse through the list.
Step2: Now, let's think about improving this solution in terms of computational efficiency. One solution would be to sort the list so that we just need to return the first (or last) 2 numbers and compute their product. This would be a substantial improvement over our previous solution, since efficient sorting algorithms have a time complexity of $O(n\;log(n))$. However, we can do better ...
Step3: Although our second implementation contains more if-else statements, which may come with some additional overhead, we now find the maximum pairwise product in linear time, $O(n)$, which is a big improvement over our previous $O(n^2)$ algorithm. Let's do a quick benchmark using the %timeit magic
Step4: Finally, to get a better sence of the order of the function, big O, let us repeat the benchmark for different array sizes
|
3,788
|
<ASSISTANT_TASK:>
Python Code:
data.head()
from IPython.display import display, HTML
display(HTML("<h1>Okay, you want not to do this on your own.. then now: How to do this (scroll down)</h1>"))
for i in range(20):
display(HTML("<br />"))
numerical_cols = [col for col in data.columns if data[col].dtype == 'int64']
numerical_cols.pop(0)
f, ax = plt.subplots(len(numerical_cols) / 2 + len(numerical_cols) % 2, 2, figsize=(20,20))
for i, col in enumerate(numerical_cols):
axx = ax[i / 2, i % 2]
sns.distplot(data[col], ax=axx)
axx.set_title(col, fontsize=20)
f.suptitle("Distributions of Columns in Pokemon Data Set", fontsize=24)
f.savefig("figures/distributions.svg")
numerical_cols.remove("Generation")
for col in ['Type 1', 'Type 2']:
data[col].fillna("Type not set", inplace=True)
mean_power = data.groupby(['Type 1', 'Type 2']).Total.mean().unstack()
f = plt.figure(figsize=(20,10))
with sns.axes_style("white"):
sns.heatmap(
mean_power, linewidths=0.5, cmap='coolwarm'
)
plt.gcf().savefig("figures/example_heatmap.svg")
f, ax = plt.subplots(len(numerical_cols) / 2 + len(numerical_cols) %2, 2, figsize=(20,30))
for i, col in enumerate(numerical_cols):
axx = ax[i / 2, i % 2]
with sns.axes_style("white"):
sns.heatmap(data.groupby(['Type 1', 'Type 2'])[col].mean().unstack(),
linewidths=0.5, cmap='coolwarm', ax=axx, square=True)
axx.set_title(col, fontsize=20)
axx.set_xticklabels(axx.xaxis.get_majorticklabels(), rotation=45)
axx.set_xlabel("")
axx.set_ylabel("")
f.suptitle("Distributions of Columns in Pokemon Data Set", fontsize=20)
pkmn = pd.melt(data,
id_vars=["Name", "Type 1", "Type 2"],
value_vars = ['HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed'],
var_name="Stat")
pkmn.sample(20)
plt.figure(figsize=(12,10))
plt.ylim(0, 275)
sns.swarmplot(x="Stat", y="value", data=pkmn, hue="Type 1", split=True, size=7)
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.gcf().savefig("figures/example_swarmplot.svg")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Okay, please try to create the following images
Step2: Okay, let's go!
Step3: Okay, nice.
Step4: Nice, we need to choose ground & fire pokemon for the maximum total power. <font size=16>Do never choose Pokemon with the Bug&Ghost Kombi!!!!!!</font>
Step5: Now, let's try the melt feature we all know from R's reshape2 package
Step6: And one fancy, so-called "swarmplot"
|
3,789
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Install jdk8
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
import os
# Set environment variable JAVA_HOME.
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
!java -version
import sys
# Set current python version
python_version = "3.7.10"
# Install Miniconda
!wget https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh
!chmod +x Miniconda3-4.5.4-Linux-x86_64.sh
!./Miniconda3-4.5.4-Linux-x86_64.sh -b -f -p /usr/local
# Update Conda
!conda install --channel defaults conda python=$python_version --yes
!conda update --channel defaults --all --yes
# Append to the sys.path
_ = (sys.path
.append(f"/usr/local/lib/python3.7/site-packages"))
os.environ['PYTHONHOME']="/usr/local"
# Install latest pre-release version of Analytics Zoo
# Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade analytics-zoo
# Install python dependencies
!pip install ray[tune]
!pip install xgboost==1.3.3
!pip install tensorboardx==2.2
# load data
from sklearn.datasets import load_boston
boston = load_boston()
y = boston['target']
X = boston['data']
# split the data into train and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# import necesary libraries and modules
from __future__ import print_function
import os
import argparse
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
# recommended to set it to True when running Analytics Zoo in Jupyter notebook.
OrcaContext.log_output = True # (this will display terminal's stdout and stderr in the Jupyter notebook).
cluster_mode = "local"
if cluster_mode == "local":
init_orca_context(cores=6, memory="2g", init_ray_on_spark=True) # run in local mode
elif cluster_mode == "k8s":
init_orca_context(cluster_mode="k8s", num_nodes=2, cores=4, init_ray_on_spark=True) # run on K8s cluster
elif cluster_mode == "yarn":
init_orca_context(
cluster_mode="yarn-client", cores=4, num_nodes=2, memory="2g", init_ray_on_spark=True,
driver_memory="10g", driver_cores=1) # run on Hadoop YARN cluster
from zoo.orca.automl import hp
search_space = {
"n_estimators": hp.grid_search([50, 100, 200]),
"max_depth": hp.choice([2, 4, 6]),
}
from zoo.orca.automl.xgboost import AutoXGBRegressor
auto_xgb_reg = AutoXGBRegressor(cpus_per_trial=2,
name="auto_xgb_classifier",
min_child_weight=3,
random_state=2)
auto_xgb_reg.fit(data=(X_train, y_train),
validation_data=(X_test, y_test),
search_space=search_space,
n_sampling=2,
metric="rmse")
best_model = auto_xgb_reg.get_best_model()
best_config = auto_xgb_reg.get_best_config()
print(best_config)
y_pred = best_model.predict(X_test)
from sklearn.metrics import mean_squared_error
print(mean_squared_error(y_test, y_pred))
# stop orca context when program finishes
stop_orca_context()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Environment Preparation
Step2: Install Analytics Zoo
Step3: You can install the latest pre-release version using pip install --pre --upgrade analytics-zoo[ray].
Step4: Distributed Automl for xgboost using Orca AutoXGBoost
Step 0
Step5: Step 1
Step6: This is the only place where you need to specify local or distributed mode. View Orca Context for more details.
Step7: Step 3
Step8: Next, use the auto xgboost regressor to fit and search for the best hyper-parameter set.
Step9: Step 4
Step10: You can also get the best hyper-parameter set.
Step11: Then, you can use the best learned model as you want. Here, we demonstrate how to predict and evaluate on the test dataset.
|
3,790
|
<ASSISTANT_TASK:>
Python Code:
# import thr random numbers module. More on modules in a future notebook
import random
# empty list
a = list()
# or
a = []
# define a list
a = [1,2,3,4,2,2]
print a
# list of numbers from 0 to 9
a = range(10)
a
# Python is zer-bases indexing
a[0]
# Get the last element
a[-1]
# Get the next to the last element
a[-2]
a[:]
# Slice the list
a[0:6] # elements with indecies 0, 1, 2, & 3
a = [1,2,2,3,4,4,4,6,7,2,2,2]
# Get the number of occurences of the element 2
a.count(2)
# the original list
a
# remove the element at with index 2 and return that value
a.pop(2)
# a is now modified
a
# delete without return
del a[1] # delete element at index 1
# print a
a
2 not in a
5 in a
# list can contain any type of Python objects, including lists
f = [1, '2', 'a string', [1, ('3', 2)], {'a':1, 'b':2}]
# get element @ index 2
f[2]
# change it
f[2] = 3
f
# length of the list
len(f)
import random
# list comprehension
a = [int(100*random.random()) for i in xrange(150)]
print a
# the same as
a = []
for i in range(150):
a.append(int(100*random.random()))
# get the max and min of a numeric list
max(a), min(a)
# make a tuple into a list
x = (1,2,3,4,5)
list(x)
# add object to the end of the list
x = [1,2,3]
x.append(4)
print x
x.append([6,7,8])
print x
# Appends the contents of seq to list
x.extend([9,10,11,12,[13,14,15]])
print x
x.extend([1,2,3])
x
a = [1,2,3]
b = [4,5,6]
c = a+b
c
# Returns count of how many times obj occurs in list
x.count(3)
# Returns the lowest index in list that obj appears
x.index(10)
# Inserts object obj into list at offset index
print x[3]
x.insert(3, ['a','b','c'])
print x
# Removes and returns last object or obj from list
x.pop()
print x
print x[3]
x.pop(3)
print x
# Removes the first occurrence of obj from list
x = [1,2,2,3,4,5,2,3,4,6,3,4,5,6,2]
x.remove(2)
print x
# Reverses objects of list in place
x.reverse()
print x
# Sort x in place
x.sort()
print x
# duplicate a list
a = [1,2,3]
b = a*5
b
import random
[random.random() for _ in range(0, 10)]
x = [random.randint(0,1000) for _ in range(10)]
print x
random.choice(x)
print range(10)
print range(0,10)
print range(5,16)
print range(-6, 7, 2)
M=[[1,2,3],
[4,5,6],
[7,8,9]]
print M
# put the 2nd column of M in a list
column = []
for row in M:
column.append(row[1])
print column
# list comprehension - another way of extracting the 2nd column of M
column = [row[1] for row in M]
print column
# compute the transpose of the matrix M
[[row[i] for row in M] for i in range(3)]
# get the diagonal elements of M
diag = [M[i][i] for i in [0, 1, 2]]
print diag
# build a list with another list as elements
[[x ** 2, x ** 3] for x in range(4)]
# build a list with an if statement
[[x, x/2, x*2] for x in range(-6, 7, 2) if x > 0]
# does the same thing as above but more
big_list = []
for x in range(-6,7,2):
if x > 0:
big_list.append([x, x/2, x*2])
print big_list
# does the same as above but lots of code
big_list = []
for x in range(-6,7,2):
lil_list = []
if x > 0:
lil_list.append(x)
lil_list.append(x/2)
lil_list.append(x*2)
big_list.append(lil_list)
print big_list
L = ["Good", # clint
"Bad", #
"Ugly"]
print L
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define a list
Step2: Accesing elements of a list
|
3,791
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
import inspect
import datetime as dt
from opengrid.library import solarmodel as sm
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 16,8
SI = sm.SolarInsolation('Brussel')
print(SI.location.latlng,
SI.elevation)
date = dt.datetime(year=2015, month=10, day=22, hour=12)
print(SI.solarElevation(date), #in radians
SI.airMass(date),
)
print(SI.directIntensity(date),
SI.globalIrradiance(date)
)
start = dt.datetime(year = 2015, month = 10, day = 20)
end = dt.datetime(year = 2015, month = 10, day = 21)
df = SI.df(start,end)
df.plot()
PVM1 = sm.PVModel('Brussel')
PVM2 = sm.PVModel('Brussel', tilt=15)
PVM3 = sm.PVModel('Brussel', orient=250)
df1 = PVM1.df(start,end)
df2 = PVM2.df(start,end)
df3 = PVM3.df(start,end)
plt.figure()
plt.plot_date(df.index, df['insolation'], '-', label='Insolation')
plt.plot_date(df1.index, df1['insolation'], '-', label='south oriented, 35 degrees tilt')
plt.plot_date(df2.index, df2['insolation'], '-', label='bad tilt')
plt.plot_date(df3.index, df3['insolation'], '-', label='bad orientation')
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solar Insolation object
Step2: It uses this location to calculate the position of the sun and the mass of the air the sun has to penetrate for a given datetime (in UTC!)
Step3: This airmass, together with the altitude is then used to calculate the direct beam intensity of the sun for that given moment. 10% of that value is added to get the Global Irradiance, both in W/m^2
Step4: Use the method SI.df to get a dataframe with hourly global irradiance values between start and end
Step5: PV Model
|
3,792
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import cf
import netCDF4
import matplotlib.pyplot as plt
dataurl = "http://thredds.socib.es/thredds/dodsC/mooring/conductivity_and_temperature_recorder/buoy_canaldeibiza-scb_sbe37006/L1/dep0003_buoy-canaldeibiza_scb-sbe37006_L1_latest.nc"
f = cf.read(dataurl)
print f
temperature = f.select('sea_water_temperature')
temperature
print len(temperature)
temperature_values = temperature[0].array
temperature_units = temperature[0].units
print temperature_values[0:20]
print 'Temperature units: ' + temperature_units
temperature[0].coords()
time = temperature[0].coord('time')
time
time_values = temperature[0].coord('time').array
time_units = temperature[0].coord('time').units
print time_values[0:20]
print ' '
print 'Time units: ' + time_units
time2 = netCDF4.num2date(time_values, time_units)
plt.plot(time2, temperature_values)
plt.ylabel(temperature_units, fontsize=20)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data file is the same.
Step2: Read the file
Step3: We see that the file contains 4 variables
Step4: The number of variables which have a standard name corresponding to sea_water_temperature is
Step5: but in other cases (ex
Step6: We inspect the corresponding coordinates
Step7: To extract the time variable
Step8: and to get the values
Step9: A simple plot
|
3,793
|
<ASSISTANT_TASK:>
Python Code:
# Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1) # Take first run
# To save time and memory for this demo, we'll just use the first
# 2.5 minutes (all we need to get 30 total events) and heavily
# resample 480->60 Hz (usually you wouldn't do either of these!)
raw = raw.crop(0, 150.).load_data()
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 20., n_jobs=1, fir_design='firwin')
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = None # no baseline as high-pass is applied
reject = dict(mag=3e-12)
# Make source space
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
add_dist=False)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
del src
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
clim = dict(kind='value', lims=[0, 2.5, 5])
samples_epochs = 5, 15,
method = 'empirical', 'shrunk'
colors = 'steelblue', 'red'
evokeds = list()
stcs = list()
methods_ordered = list()
for n_train in samples_epochs:
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
events_ = events_[np.argsort(events_[:, 0])]
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject,
decim=8)
epochs_train.equalize_event_counts(event_ids)
assert len(epochs_train) == 2 * n_train
# We know some of these have too few samples, so suppress warning
# with verbose='error'
noise_covs = compute_covariance(
epochs_train, method=method, tmin=None, tmax=0, # baseline only
return_estimators=True, verbose='error') # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
del epochs_train, events_
# do contrast
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
stcs.append(list())
methods_ordered.append(list())
for cov in noise_covs:
inverse_operator = make_inverse_operator(evokeds[0].info, forward,
cov, loose=0.2, depth=0.8,
rank=274)
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
methods_ordered[-1].append(cov['method'])
stcs[-1].append(stc)
del inverse_operator, evokeds, cov, noise_covs, stc, stc_a, stc_b
del raw, forward # save some memory
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 5))
for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))):
# compute stc based on worst and best
ax_dynamics = axes[1]
for stc, ax, method, kind, color in zip(stcs[ni],
axes[::2],
methods_ordered[ni],
['best', 'worst'],
colors):
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim,
initial_time=0.175, background='w', foreground='k')
brain.show_view('ven')
im = brain.screenshot()
brain.close()
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(method, kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set(title='{0} epochs'.format(n_train * 2),
xlabel='Time (ms)', ylabel='Source Activation (dSPM)',
xlim=(tmin * 1e3, tmax * 1e3), ylim=(-3, 3))
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.2, left=0.01, right=0.99, wspace=0.03)
mne.viz.utils.plt_show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get data
Step2: Estimate covariances
Step3: Show the resulting source estimates
|
3,794
|
<ASSISTANT_TASK:>
Python Code:
weather = pd.read_table("daily_weather.tsv")
usage = pd.read_table("usage_2012.tsv")
station = pd.read_table("stations.tsv")
weather.loc[weather['season_code'] == 1, 'season_desc'] = 'winter'
weather.loc[weather['season_code'] == 2, 'season_desc'] = 'spring'
weather.loc[weather['season_code'] == 3, 'season_desc'] = 'summer'
weather.loc[weather['season_code'] == 4, 'season_desc'] = 'fall'
weather['date'] = pd.to_datetime(weather['date'])
month_rental = weather.groupby(weather['date'].dt.month)['total_riders'].sum()
mean = weather.groupby('season_desc')['temp'].mean()
weather['temp'].plot()
weather['month'] = pd.DatetimeIndex(weather.date).month
weather.groupby('month')['temp', 'humidity'].mean().plot(kind='bar')
plt.scatter(
weather[weather['season_desc'] == 'winter']['temp'],
weather[weather['season_desc'] == 'winter']['total_riders'],
s=30, color='blue', label='winter')
plt.scatter(
weather[weather['season_desc'] == 'spring']['temp'],
weather[weather['season_desc'] == 'spring']['total_riders'],
s=30, color='magenta', label='spring')
plt.scatter(
weather[weather['season_desc'] == 'summer']['temp'],
weather[weather['season_desc'] == 'summer']['total_riders'],
s=30, color='cyan', label='summer')
plt.scatter(
weather[weather['season_desc'] == 'fall']['temp'],
weather[weather['season_desc'] == 'fall']['total_riders'],
s=30, color='yellow', label='fall')
plt.title("Daily rental volume and temperature")
plt.legend(loc=4)
plt.show()
plt.scatter(
weather[weather['season_desc'] == 'winter']['windspeed'],
weather[weather['season_desc'] == 'winter']['total_riders'],
s=30, color='blue', label='winter')
plt.scatter(
weather[weather['season_desc'] == 'spring']['windspeed'],
weather[weather['season_desc'] == 'spring']['total_riders'],
s=30, color='magenta', label='spring')
plt.scatter(
weather[weather['season_desc'] == 'summer']['windspeed'],
weather[weather['season_desc'] == 'summer']['total_riders'],
s=30, color='cyan', label='summer')
plt.scatter(
weather[weather['season_desc'] == 'fall']['windspeed'],
weather[weather['season_desc'] == 'fall']['total_riders'],
s=30, color='yellow', label='fall')
plt.title("Daily rental volume and windspeed")
plt.legend(loc=1)
plt.show()
stations = station [['station', 'lat', 'long']]
stations
count = usage['station_start'].value_counts()
average_rental_df = DataFrame({ 'average_rental' : count / 365})
average_rental_df.index = range(185)
average_rental_df
merged_df = pd.concat([stations, average_rental_df], axis=1)
plt.scatter(merged_df['long'], merged_df['lat'], color=c_cycle, alpha=0.5, s=(merged_df['average_rental']*10), label='Location of stations', )
plt.legend(bbox_to_anchor=(1.2, 0.2), loc='lower right', borderaxespad=0)
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Rental volume and geography')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.Use a scatterplot to show how the daily rental volume varies with temperature. Use a different series (with different colors) for each season.
Step2: 3.Create another scatterplot to show how daily rental volume varies with windspeed. As above, use a different series for each season.
Step3: 4.How do the rental volumes vary with geography? Compute the average daily rentals for each station and use this as the radius for a scatterplot of each station's latitude and longitude.
|
3,795
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
num_examples, num_features = (1000, 2) # dataset size
num_classes = 2 # binary classification task
X = np.random.random((num_examples, num_features))
y = np.int_(X[:, 0] * X[:, 0] + X[:, 1] >= 1).reshape(-1, 1)
print("Features (X): {shape[0]}x{shape[1]}".format(shape=X.shape))
print(X[:10])
print("Labels (y): {shape[0]}".format(shape=y.shape))
print(y[:10])
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
def plot_data_2D(X, **kwargs):
Plot 2D data points in X = np.array([(x0, y0), (x1, y1), ...]).
fig, ax = plt.subplots(figsize=(9, 9))
ax.scatter(X[:, 0], X[:, 1],
s=35, cmap=plt.cm.get_cmap('rainbow', num_classes), **kwargs)
ax.set_aspect('equal')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
return fig, ax
plot_data_2D(X, c=y)
# Split data into training and test sets
from sklearn.cross_validation import train_test_split
test_size = 300
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
print("Split dataset: {} training, {} test samples".format(len(X_train), len(X_test)))
# Plot training data
plot_data_2D(X_train, c=y_train)
# Plot test data
plot_data_2D(X_test, c=y_test)
import tensorflow as tf
# Create a TensorFlow session
session = tf.Session()
# Placeholders for input features and ground truth labels
X_placeholder = tf.placeholder(tf.float32,
shape=(None, num_features),
name="X")
y_placeholder = tf.placeholder(tf.int64,
shape=(None, 1),
name="y")
def plot_activation(func, title, x_min=-10.0, x_max=10.0, num_samples=100):
Visualize a given activation function as a curve.
x = np.linspace(x_min, x_max, num_samples)
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(x, func(x), linewidth=3)
ax.set_title(title)
ax.patch.set_facecolor('white')
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_xlim(x_min - 1, x_max + 1)
ax.set_ylim(-0.1, 1.1)
def linear_layer(input_tensor, num_units):
Linear activation layer: output = inputs * weights + biases
num_inputs = input_tensor.get_shape()[1].value # inspect tensor size
weights = tf.Variable(
tf.truncated_normal([num_inputs, num_units], stddev=1.0),
name='weights')
biases = tf.Variable(
tf.zeros([num_units]),
name='biases')
return tf.add(tf.matmul(input_tensor, weights, name='multiply'), biases,
name='add')
plot_activation(lambda x: 0.5 + 0.1*x, title='Linear') # example
def sigmoid_layer(input_tensor, num_units):
Sigmoid activation layer: output = sigmoid(inputs * weights + biases)
return tf.nn.sigmoid(linear_layer(input_tensor, num_units), name='sigmoid')
plot_activation(lambda x: 1.0 / (1.0 + np.exp(-x)), title='Sigmoid') # example
def relu_layer(input_tensor, num_units):
ReLU activation layer: output = ReLU(inputs * weights + biases)
return tf.nn.relu(linear_layer(input_tensor, num_units), name='relu')
plot_activation(lambda x: (0.5 + 0.1*x).clip(min=0), title='ReLU') # example
# Let's make a network with 2 hidden layers (and an output layer)
hidden1_num_units = 10
hidden2_num_units = 3
output_num_units = 1 # binary classification needs only 1 output
with tf.name_scope('hidden1'):
hidden1 = relu_layer(X_placeholder, hidden1_num_units)
with tf.name_scope('hidden2'):
hidden2 = relu_layer(hidden1, hidden2_num_units)
with tf.name_scope('output'):
output = sigmoid_layer(hidden2, output_num_units)
def l2_loss(logits, labels):
Euclidean distance or L2-norm: sqrt(sum((logits - labels)^2))
labels = tf.to_float(labels)
return tf.nn.l2_loss(logits - labels, name='l2_loss')
with tf.name_scope('error'):
error = l2_loss(output, y_placeholder) # predicted vs. true labels
tf.summary.scalar(error.op.name, error) # write error (loss) to log
# Other possible error metrics
def mse_loss(logits, labels):
Mean squared error: mean(sum((logits - labels)^2)
labels = tf.to_float(labels)
return tf.reduce_mean(tf.square(logits - labels), name='mse_loss')
def cross_entropy_loss(logits, labels):
Mean cross-entropy loss: mean(cross_entropy(softmax(logits), labels))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels,
name='cross_entropy')
return tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
print("Nodes in computation graph:"
+ "\n Input features: {}".format(X_placeholder)
+ "\n Hidden layer 1: {}".format(hidden1)
+ "\n Hidden layer 2: {}".format(hidden2)
+ "\n Output labels : {}".format(output)
+ "\n Ground truth : {}".format(y_placeholder)
+ "\n Error metric : {}".format(error))
import time
import os
log_basedir = "logs"
run_label = time.strftime('%Y-%m-%d_%H-%M-%S') # e.g. 2016-08-18_21-30-45
log_path = os.path.join(log_basedir, run_label)
all_summaries = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_path, session.graph)
print("Logging to: {}".format(log_path))
# Pick a training algorithm
def sgd_train(error, learning_rate=0.01):
Gradient descent optimizer for training.
Creates an optimizer to compute and apply gradients to all trainable variables.
Args:
error: Error (loss) metric.
learning_rate: Controls the size of each step the optimizer takes.
Returns:
training: Training operation, ready to be called with tf.Session.run().
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
return optimizer.minimize(error)
with tf.name_scope('training'):
training = sgd_train(error)
# Define training parameters
num_steps = 1000 # how many iterations to train for
batch_size = 100 # how many samples in each iteration
# Initialize variables
init_op = tf.global_variables_initializer()
session.run(init_op)
# Run training operation for num_steps iterations
for step in range(num_steps):
# Randomly pick batch_size samples from training set
sample_idx = np.random.choice(len(X_train), batch_size, replace=False)
feed_dict = {
X_placeholder: X_train[sample_idx, :],
y_placeholder: y_train[sample_idx, :]
}
# Note: feed_dict uses placeholder objects as key!
# Train for one iteration, time it
start_time = time.time()
_, error_value = session.run([training, error], feed_dict=feed_dict)
duration = time.time() - start_time
# Print an overview and write summaries (logs) every 100 iterations
if step % 100 == 0 or step == (num_steps - 1):
print("Step {:4d}: training error = {:5.2f} ({:.3f} sec)"
.format(step, error_value, duration))
summary_str = session.run(all_summaries, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Check performance on test set
y_test_pred, test_error = session.run([output, error], feed_dict={
X_placeholder: X_test,
y_placeholder: y_test
})
# Note: The placeholder shapes must be compatible with the tensors being supplied!
y_test_pred = np.int_(np.round_(y_test_pred))
mismatches = (y_test_pred - y_test).flat != 0
print("Test error = {:.2f} ({} mismatches)"
.format(test_error, sum(np.int_(mismatches))))
_, ax = plot_data_2D(X_test, c=y_test_pred)
ax.scatter(X_test[mismatches, 0], X_test[mismatches, 1],
s=128, marker='o', facecolors='none', edgecolors='black', linewidths=1.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Visualize Data
Step3: Prepare Data for Training and Testing
Step4: Build Computation Graph
Step6: Define helper functions
Step10: Define layer creation functions
Step11: Create network structure
Step15: Choose an error metric
Step16: Setup Logging
Step18: Train your Model
Step19: Test your Model
|
3,796
|
<ASSISTANT_TASK:>
Python Code:
import numpy
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy import linalg
from scipy.integrate import simps
%matplotlib inline
def f(x):
return x * numpy.sin(3 * numpy.pi * x / 2) + (9/4) * (numpy.pi ** 2) * numpy.sin(3 * numpy.pi * x /2)
def expected_solution_calc(x):
return numpy.sin(3 * numpy.pi * x / 2)
def calc_K(h):
K = numpy.zeros((3, 3))
K[0][0] = K[2][2]= 19 / 6
K[1][0] = K[0][1] = - 16 / 3
K[2][0] = K[0][2] = 13 / 6
K[1][1] = 32 / 3
K[1][2] = K[2][1] = - 16 / 3
K *= 2 / h
return K
def calc_M(xj_1, xj, h):
M = numpy.zeros((3, 3))
M[0][0] = (7 * xj_1 + xj) / 30
M[1][0] = M[0][1] = 2 * xj_1 / 15
M[2][0] = M[0][2] = (-xj_1 - xj) / 30
M[1][1] = 8 * (xj_1 + xj) / 15
M[1][2] = M[2][1] = 2 * xj / 15
M[2][2] = (xj_1 + 7 * xj) / 30
M *= h / 2
return M
def calc_I(xj_1, xj, h):
fj_1 = f(xj_1)
fj = f(xj)
I = numpy.zeros(3)
I[0] = 1/3 * fj_1
I[1] = 2/3 * (fj_1 + fj)
I[2] = 1/3 * fj
I *= h/2
return I
def create_global_matrixes(number_of_points):
x = numpy.linspace(0, 1, number_of_points)
number_of_elements = number_of_points - 1
h = 1 / number_of_elements
global_size = 3
for _ in range(number_of_elements - 1):
global_size += 2
K_global = numpy.zeros((global_size, global_size))
M_global = numpy.zeros((global_size, global_size))
I_global = numpy.zeros(global_size)
K = calc_K(h)
K_global[0:3, 0:3] = K
shift = 2
for _ in range(number_of_elements - 1):
temp = K_global[shift, shift]
K_global[shift:shift + 3, shift:shift + 3] = K
K_global[shift, shift] += temp
shift += 2
M_global[0:3, 0:3] = calc_M(x[0], x[1], h)
shift = 2
for i in range(1, number_of_elements):
temp = M_global[shift, shift]
M_global[shift:shift + 3, shift:shift + 3] = calc_M(x[i], x[i + 1], h)
M_global[shift, shift] += temp
shift += 2
I_global[0:3] = calc_I(x[0], x[1], h)
shift = 2
for i in range(1, number_of_elements):
temp = I_global[shift]
I_global[shift:shift+3] = calc_I(x[i], x[i + 1], h)
I_global[shift] += temp
shift += 2
return x, K_global, M_global, I_global
def mapping(calc_x, xj_1, xj):
return (xj_1 + xj - 2 * calc_x) / (xj_1 - xj)
def mapping2(z, xj_1, xj):
return (1-z)/2*xj_1 + (1+z)/2*x
def N0(qsi):
return 1/2 * ((qsi ** 2) - qsi)
def N1(qsi):
return 1 - qsi ** 2
def N2(qsi):
return 1/2 * ((qsi ** 2) + qsi)
def calc_solution(calc_x, x, coeffs):
interval_num = 0
xj_1 = 0
xj = 0
if (calc_x < 0) and (calc_x > 1):
return False
if calc_x == 0:
return 0
for index, item in enumerate(x):
if calc_x <= item:
interval_num = index
xj_1 = x[index - 1]
xj = x[index]
break
c = [0, 0, 0]
for i in range(3):
c[i] = coeffs[(interval_num - 1) * 2 + i]
cur_qsi = mapping(calc_x, xj_1, xj)
res = c[0] * N0(cur_qsi) + c[1] * N1(cur_qsi) + c[2] * N2(cur_qsi)
return res
def solve(number_of_points):
x, K_global, M_global, I_global = create_global_matrixes(number_of_points)
h = 1 / (number_of_points - 1)
K_global_bc = K_global[1:,1:]
M_global_bc = M_global[1:, 1:]
I_global_bc = I_global[1:]
matr = K_global_bc + M_global_bc
solution = linalg.solve(matr, I_global_bc)
coeffs = numpy.concatenate(([0], solution))
eq_solution = []
for item in x:
temp = calc_solution(item, x, coeffs)
eq_solution.append(temp)
return x, eq_solution, h
x, eq_solution, h = numpy.array(solve(100))
expected_solution = expected_solution_calc(x)
plt.plot(x, expected_solution, 'x', label='Точное решение')
plt.plot(x, eq_solution, label='Полученное решение')
plt.title('N=100')
plt.legend()
def calc_derivatives(delta, h):
der_list = [0]
l = len(delta)
for i in range(1, l - 1):
der_list.append((delta[i + 1] - delta[i - 1]) / (2 * h))
der_list.append(0)
return numpy.array(der_list)
e0_list = []
e1_list = []
N_list = []
for i in range(50, 501, 50):
x, eq_solution, h = numpy.array(solve(i))
expected_solution = expected_solution_calc(x)
delta = eq_solution - expected_solution
d_delta = calc_derivatives(delta, h)
e0 = numpy.sqrt(simps(delta ** 2, x))
e1 = numpy.sqrt(simps(d_delta ** 2, x))
e0_list.append(e0)
e1_list.append(e1)
N_list.append(i)
rc('text', usetex=True)
plt.plot(N_list, e0_list, 'x', label='$||\Delta||_0$')
plt.plot(N_list, e1_list, 'x', label='$||\Delta||_1$')
plt.xlabel('N')
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Матрица $K_j = \frac{2}{h} \int \limits_{-1}^{1} \frac{d}{d\xi}
Step2: Аналогично
Step3: Вектор нагрузки также можно записать в виде скалярного произведения
Step4: Глобальные матрицы конструируются комбинированием элементарных. Они перекрываются по элементам с целыми индексами, а именно последний элемент последней строки матрицы прибавляется к первому элементу первой строки матрицы. В случае с вектором нагрузки последний элемент предыдущего вектора прибаляется к первому элементу следующего.
Step5: В итоге, задача может быть переписана в виде
Step6: Проведем анализ погрешности. Введем величину погрешности $\Delta(x) = u(x) - u_{ex}(x)$, где $u_{ex}(x)$ - точное решение уравнения.
Step7: Построим зависимость обеих норм погрешности от числа точек
|
3,797
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import fit_iir_model_raw
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_fname)
proj = mne.read_proj(proj_fname)
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
# Set up pick list: Gradiometers - bad channels
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
order = 5 # define model order
picks = picks[:5]
# Estimate AR models on raw data
b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
d, times = raw[0, 1e4:2e4] # look at one channel from now on
d = d.ravel() # make flat vector
innovation = signal.convolve(d, a, 'valid')
d_ = signal.lfilter(b, a, innovation) # regenerate the signal
d_ = np.r_[d_[0] * np.ones(order), d_] # dummy samples to keep signal length
plt.close('all')
plt.figure()
plt.plot(d[:100], label='signal')
plt.plot(d_[:100], label='regenerated signal')
plt.legend()
plt.figure()
plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
plt.legend(('Signal', 'Innovation', 'Regenerated signal'))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot the different time series and PSDs
|
3,798
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
from IPython.core.display import HTML
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
Image(url= "https://cdn-images-1.medium.com/max/1600/1*UkI9za9zTR-HL8uM15Wmzw.png")
#hyperparams
num_epochs = 100
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
num_classes = 2
echo_step = 3
batch_size = 5
num_batches = total_series_length//batch_size//truncated_backprop_length
#Step 1 - Collect data
#Now generate the training data,
#the input is basically a random binary vector. The output will be the
#“echo” of the input, shifted echo_step steps to the right.
#Notice the reshaping of the data into a matrix with batch_size rows.
#Neural networks are trained by approximating the gradient of loss function
#with respect to the neuron-weights, by looking at only a small subset of the data,
#also known as a mini-batch.The reshaping takes the whole dataset and puts it into
#a matrix, that later will be sliced up into these mini-batches.
def generateData():
#0,1, 50K samples, 50% chance each chosen
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
#shift 3 steps to the left
y = np.roll(x, echo_step)
#padd beginning 3 values with 0
y[0:echo_step] = 0
#Gives a new shape to an array without changing its data.
#The reshaping takes the whole dataset and puts it into a matrix,
#that later will be sliced up into these mini-batches.
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
data = generateData()
print(data)
#Schematic of the reshaped data-matrix, arrow curves shows adjacent time-steps that ended up on different rows.
#Light-gray rectangle represent a “zero” and dark-gray a “one”.
Image(url= "https://cdn-images-1.medium.com/max/1600/1*aFtwuFsboLV8z5PkEzNLXA.png")
#TensorFlow works by first building up a computational graph, that
#specifies what operations will be done. The input and output of this graph
#is typically multidimensional arrays, also known as tensors.
#The graph, or parts of it can then be executed iteratively in a
#session, this can either be done on the CPU, GPU or even a resource
#on a remote server.
#operations and tensors
#The two basic TensorFlow data-structures that will be used in this
#example are placeholders and variables. On each run the batch data
#is fed to the placeholders, which are “starting nodes” of the
#computational graph. Also the RNN-state is supplied in a placeholder,
#which is saved from the output of the previous run.
#Step 2 - Build the Model
#datatype, shape (5, 15) 2D array or matrix, batch size shape for later
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
#and one for the RNN state, 5,4
init_state = tf.placeholder(tf.float32, [batch_size, state_size])
#The weights and biases of the network are declared as TensorFlow variables,
#which makes them persistent across runs and enables them to be updated
#incrementally for each batch.
#3 layer recurrent net, one hidden state
#randomly initialize weights
W = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)
#anchor, improves convergance, matrix of 0s
b = tf.Variable(np.zeros((1,state_size)), dtype=tf.float32)
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1,num_classes)), dtype=tf.float32)
Image(url= "https://cdn-images-1.medium.com/max/1600/1*n45uYnAfTDrBvG87J-poCA.jpeg")
#Now it’s time to build the part of the graph that resembles the actual RNN computation,
#first we want to split the batch data into adjacent time-steps.
# Unpack columns
#Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
#so a bunch of arrays, 1 batch per time step
# Change to unstack for new version of TF
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1)
Image(url= "https://cdn-images-1.medium.com/max/1600/1*f2iL4zOkBUBGOpVE7kyajg.png")
#Schematic of the current batch split into columns, the order index is shown on each data-point
#and arrows show adjacent time-steps.
#Forward pass
#state placeholder
current_state = init_state
#series of states through time
states_series = []
#for each set of inputs
#forward pass through the network to get new state value
#store all states in memory
for current_input in inputs_series:
#format input
current_input = tf.reshape(current_input, [batch_size, 1])
#mix both state and input data
input_and_state_concatenated = tf.concat(1, [current_input, current_state]) # Increasing number of columns
#perform matrix multiplication between weights and input, add bias
#squash with a nonlinearity, for probabiolity value
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition
#store the state in memory
states_series.append(next_state)
#set current state to next one
current_state = next_state
Image(url= "https://cdn-images-1.medium.com/max/1600/1*fdwNNJ5UOE3Sx0R_Cyfmyg.png")
#calculate loss
#second part of forward pass
#logits short for logistic transform
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
#apply softmax nonlinearity for output probability
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
#measure loss, calculate softmax again on logits, then compute cross entropy
#measures the difference between two probability distributions
#this will return A Tensor of the same shape as labels and of the same type as logits
#with the softmax cross entropy loss.
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels) for logits, labels in zip(logits_series,labels_series)]
#computes average, one value
total_loss = tf.reduce_mean(losses)
#use adagrad to minimize with .3 learning rate
#minimize it with adagrad, not SGD
#One downside of SGD is that it is sensitive to
#the learning rate hyper-parameter. When the data are sparse and features have
#different frequencies, a single learning rate for every weight update can have
#exponential regret.
#Some features can be extremely useful and informative to an optimization problem but
#they may not show up in most of the training instances or data. If, when they do show up,
#they are weighted equally in terms of learning rate as a feature that has shown up hundreds
#of times we are practically saying that the influence of such features means nothing in the
#overall optimization. it's impact per step in the stochastic gradient descent will be so small
#that it can practically be discounted). To counter this, AdaGrad makes it such that features
#that are more sparse in the data have a higher learning rate which translates into a larger
#update for that feature
#sparse features can be very useful.
#Each feature has a different learning rate which is adaptable.
#gives voice to the little guy who matters a lot
#weights that receive high gradients will have their effective learning rate reduced,
#while weights that receive small or infrequent updates will have their effective learning rate increased.
#great paper http://seed.ucsd.edu/mediawiki/images/6/6a/Adagrad.pdf
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
#visualizer
def plot(loss_list, predictions_series, batchX, batchY):
plt.subplot(2, 3, 1)
plt.cla()
plt.plot(loss_list)
for batch_series_idx in range(5):
one_hot_output_series = np.array(predictions_series)[:, batch_series_idx, :]
single_output_series = np.array([(1 if out[0] < 0.5 else 0) for out in one_hot_output_series])
plt.subplot(2, 3, batch_series_idx + 2)
plt.cla()
plt.axis([0, truncated_backprop_length, 0, 2])
left_offset = range(truncated_backprop_length)
plt.bar(left_offset, batchX[batch_series_idx, :], width=1, color="blue")
plt.bar(left_offset, batchY[batch_series_idx, :] * 0.5, width=1, color="red")
plt.bar(left_offset, single_output_series * 0.3, width=1, color="green")
plt.draw()
plt.pause(0.0001)
#Step 3 Training the network
with tf.Session() as sess:
#we stupidly have to do this everytime, it should just know
#that we initialized these vars. v2 guys, v2..
sess.run(tf.initialize_all_variables())
#interactive mode
plt.ion()
#initialize the figure
plt.figure()
#show the graph
plt.show()
#to show the loss decrease
loss_list = []
for epoch_idx in range(num_epochs):
#generate data at eveery epoch, batches run in epochs
x,y = generateData()
#initialize an empty hidden state
_current_state = np.zeros((batch_size, state_size))
print("New data, epoch", epoch_idx)
#each batch
for batch_idx in range(num_batches):
#starting and ending point per batch
#since weights reoccuer at every layer through time
#These layers will not be unrolled to the beginning of time,
#that would be too computationally expensive, and are therefore truncated
#at a limited number of time-steps
start_idx = batch_idx * truncated_backprop_length
end_idx = start_idx + truncated_backprop_length
batchX = x[:,start_idx:end_idx]
batchY = y[:,start_idx:end_idx]
#run the computation graph, give it the values
#we calculated earlier
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
[total_loss, train_step, current_state, predictions_series],
feed_dict={
batchX_placeholder:batchX,
batchY_placeholder:batchY,
init_state:_current_state
})
loss_list.append(_total_loss)
if batch_idx%100 == 0:
print("Step",batch_idx, "Loss", _total_loss)
plot(loss_list, _predictions_series, batchX, batchY)
plt.ioff()
plt.show()
Image(url= "https://cdn-images-1.medium.com/max/1600/1*uKuUKp_m55zAPCzaIemucA.png")
Image(url= "https://cdn-images-1.medium.com/max/1600/1*ytquMdmGMJo0-3kxMCi1Gg.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The figure below shows the input data-matrix, and the current batch batchX_placeholder
Step2: As you can see in the picture below that is done by unpacking the columns (axis = 1) of the batch into a Python list. The RNN will simultaneously be training on different parts in the time-series; steps 4 to 6, 16 to 18 and 28 to 30 in the current batch-example. The reason for using the variable names “plural”_”series” is to emphasize that the variable is a list that represent a time-series with multiple entries at each step.
Step3: The fact that the training is done on three places simultaneously in our time-series, requires us to save three instances of states when propagating forward. That has already been accounted for, as you see that the init_state placeholder has batch_size rows.
Step4: Notice the concatenation on line 6, what we actually want to do is calculate the sum of two affine transforms current_input * Wa + current_state * Wb in the figure below. By concatenating those two tensors you will only use one matrix multiplication. The addition of the bias b is broadcasted on all samples in the batch.
Step5: You may wonder the variable name truncated_backprop_length is supposed to mean. When a RNN is trained, it is actually treated as a deep neural network with reoccurring weights in every layer. These layers will not be unrolled to the beginning of time, that would be too computationally expensive, and are therefore truncated at a limited number of time-steps. In our sample schematics above, the error is backpropagated three steps in our batch
Step6: The last line is adding the training functionality, TensorFlow will perform back-propagation for us automatically — the computation graph is executed once for each mini-batch and the network-weights are updated incrementally.
Step7: There is a visualization function so we can se what’s going on in the network as we train. It will plot the loss over the time, show training input, training output and the current predictions by the network on different sample series in a training batch.
Step8: You can see that we are moving truncated_backprop_length steps forward on each iteration (line 15–19), but it is possible have different strides. This subject is further elaborated in this article. The downside with doing this is that truncated_backprop_length need to be significantly larger than the time dependencies (three steps in our case) in order to encapsulate the relevant training data. Otherwise there might a lot of “misses”, as you can see on the figure below.
Step9: Time series of squares, the elevated black square symbolizes an echo-output, which is activated three steps from the echo input (black square). The sliding batch window is also striding three steps at each run, which in our sample case means that no batch will encapsulate the dependency, so it can not train.
|
3,799
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'sandbox-1', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.