Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
4,700
|
<ASSISTANT_TASK:>
Python Code:
from jupyterthemes import get_themes
from jupyterthemes.stylefx import set_nb_theme
themes = get_themes()
set_nb_theme(themes[1])
%load_ext watermark
%watermark -a 'Ethen' -d -t -v -p jupyterthemes
# adjacency list can be efficiently represented as
# a python dictionary, where the nodes are the keys
# and the nodes that they are connected to are stored
# as a list of values
graph = {'A': ['B', 'C', 'E'],
'B': ['A','D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B','D'],
'F': ['C'],
'G': ['C']}
from collections import deque
def bfs(graph, start):
Graph traversal using Breadth First Searh
# keep track of all visited nodes
visited = set()
# keep track nodes to be checked
queue = deque(start)
while queue:
node = queue.popleft()
if node not in visited:
visited.add(node)
neighbors = graph[node]
for neighbor in neighbors:
queue.append(neighbor)
return visited
start = 'A'
bfs(graph, start)
def dfs(graph, start):
Graph traversal using Depth First Searh
visited = set()
stack = [start]
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
neighbors = graph[node]
for neighbor in neighbors:
stack.append(neighbor)
return visited
start = 'A'
dfs(graph, start)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Tree Data Structure
Step4: For depth first search, the list of actions to perform upon each visit to a node
|
4,701
|
<ASSISTANT_TASK:>
Python Code:
sns.lmplot(x='Attack',y='Defense',hue='Legendary',fit_reg=False,markers=['x','o'],data = pokemon)
plt.show()
sns.heatmap(
pokemon.loc[:, ['HP', 'Attack', 'Sp. Atk', 'Defense', 'Sp. Def', 'Speed']].corr(),
annot=True
)
plt.show()
import pandas as pd
from pandas.plotting import parallel_coordinates
p = (pokemon[(pokemon['Type 1'].isin(["Psychic", "Fighting"]))]
.loc[:, ['Type 1', 'Attack', 'Sp. Atk', 'Defense', 'Sp. Def']]
)
parallel_coordinates(p, 'Type 1')
plt.show()
sns.swarmplot(x='Generation',y='Defense',hue='Legendary',data = pokemon)
plt.show()
sns.stripplot(x='Generation',y='Defense',hue='Legendary',data = pokemon)
plt.show()
sns.boxplot(x="Generation", y="Total", hue='Legendary', data=pokemon)
plt.show()
sns.violinplot(x='Generation',y='Defense',hue='Legendary',data = pokemon,
split=True, inner="stick", palette="Set3");
plt.show()
sns.barplot(x="Generation", y="Defense", hue="Legendary", data=pokemon)
plt.show()
sns.countplot(x="Generation",hue="Legendary", data=pokemon )
plt.show()
sns.pointplot(x="Generation", y="Defense", hue="Legendary", data=pokemon)
plt.show()
# sns.swarmplot(x='Generation',y='Defense',hue='Legendary',data = pokemon)
sns.factorplot(x="Generation", y="Defense", hue="Legendary", data=pokemon, kind="swarm");
sns.factorplot(x="Generation", y="Defense", hue="Legendary",data=pokemon, kind="box")
sns.factorplot(x="Generation", y="Defense", hue="Legendary",data=pokemon, kind="bar")
sns.factorplot(x="Generation", y="Defense", col="Legendary", data=pokemon, kind="bar")
sns.factorplot(x="Generation", y="Defense", row="Legendary", data=pokemon, kind="bar")
# hue row,col 三个关系
#sns.factorplot(x="Generation", y="Defense", hue="Legendary", col="time", data=pokemon, kind="bar")
plt.show()
g = sns.FacetGrid(pokemon, col ="Generation", row="Legendary")
g.map(sns.kdeplot, "Attack")
plt.show()
sns.pairplot(pokemon[['HP', 'Attack', 'Defense']])
plt.show()
g = sns.PairGrid(pokemon,
x_vars=["Generation","Legendary"],
y_vars=["Attack","Defense","Sp. Atk", "Sp. Def"],
aspect=.85, size=6)
g.map(sns.violinplot,palette="pastel")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 非常实用的方法是将Seaborn的分类图分为三类,将分类变量每个级别的每个观察结果显示出来,显示每个观察分布的抽象表示,以及应用统计估计显示的权重趋势和置信区间:
Step2: 1. Facet Grid 2 . Pair Plot
|
4,702
|
<ASSISTANT_TASK:>
Python Code:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.interpolate import cross_section
data = xr.open_dataset(get_test_data('narr_example.nc', False))
data = data.metpy.parse_cf().squeeze()
print(data)
start = (37.0, -105.0)
end = (35.5, -65.0)
cross = cross_section(data, start, end).set_coords(('lat', 'lon'))
print(cross)
cross['Potential_temperature'] = mpcalc.potential_temperature(
cross['isobaric'],
cross['Temperature']
)
cross['Relative_humidity'] = mpcalc.relative_humidity_from_specific_humidity(
cross['isobaric'],
cross['Temperature'],
cross['Specific_humidity']
)
cross['u_wind'] = cross['u_wind'].metpy.convert_units('knots')
cross['v_wind'] = cross['v_wind'].metpy.convert_units('knots')
cross['t_wind'], cross['n_wind'] = mpcalc.cross_section_components(
cross['u_wind'],
cross['v_wind']
)
print(cross)
# Define the figure object and primary axes
fig = plt.figure(1, figsize=(16., 9.))
ax = plt.axes()
# Plot RH using contourf
rh_contour = ax.contourf(cross['lon'], cross['isobaric'], cross['Relative_humidity'],
levels=np.arange(0, 1.05, .05), cmap='YlGnBu')
rh_colorbar = fig.colorbar(rh_contour)
# Plot potential temperature using contour, with some custom labeling
theta_contour = ax.contour(cross['lon'], cross['isobaric'], cross['Potential_temperature'],
levels=np.arange(250, 450, 5), colors='k', linewidths=2)
theta_contour.clabel(theta_contour.levels[1::2], fontsize=8, colors='k', inline=1,
inline_spacing=8, fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot winds using the axes interface directly, with some custom indexing to make the barbs
# less crowded
wind_slc_vert = list(range(0, 19, 2)) + list(range(19, 29))
wind_slc_horz = slice(5, 100, 5)
ax.barbs(cross['lon'][wind_slc_horz], cross['isobaric'][wind_slc_vert],
cross['t_wind'][wind_slc_vert, wind_slc_horz],
cross['n_wind'][wind_slc_vert, wind_slc_horz], color='k')
# Adjust the y-axis to be logarithmic
ax.set_yscale('symlog')
ax.set_yticklabels(np.arange(1000, 50, -100))
ax.set_ylim(cross['isobaric'].max(), cross['isobaric'].min())
ax.set_yticks(np.arange(1000, 50, -100))
# Define the CRS and inset axes
data_crs = data['Geopotential_height'].metpy.cartopy_crs
ax_inset = fig.add_axes([0.125, 0.665, 0.25, 0.25], projection=data_crs)
# Plot geopotential height at 500 hPa using xarray's contour wrapper
ax_inset.contour(data['x'], data['y'], data['Geopotential_height'].sel(isobaric=500.),
levels=np.arange(5100, 6000, 60), cmap='inferno')
# Plot the path of the cross section
endpoints = data_crs.transform_points(ccrs.Geodetic(),
*np.vstack([start, end]).transpose()[::-1])
ax_inset.scatter(endpoints[:, 0], endpoints[:, 1], c='k', zorder=2)
ax_inset.plot(cross['x'], cross['y'], c='k', zorder=2)
# Add geographic features
ax_inset.coastlines()
ax_inset.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='k', alpha=0.2, zorder=0)
# Set the titles and axes labels
ax_inset.set_title('')
ax.set_title(f'NARR Cross-Section \u2013 {start} to {end} \u2013 '
f'Valid: {cross["time"].dt.strftime("%Y-%m-%d %H:%MZ").item()}\n'
'Potential Temperature (K), Tangential/Normal Winds (knots), Relative Humidity '
'(dimensionless)\nInset: Cross-Section Path and 500 hPa Geopotential Height')
ax.set_ylabel('Pressure (hPa)')
ax.set_xlabel('Longitude (degrees east)')
rh_colorbar.set_label('Relative Humidity (dimensionless)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data
Step2: Define start and end points
Step3: Get the cross section, and convert lat/lon to supplementary coordinates
Step4: For this example, we will be plotting potential temperature, relative humidity, and
Step5: Now, we can make the plot.
|
4,703
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
print(phoebe.multiprocessing_get_nprocs())
phoebe.multiprocessing_off()
print(phoebe.multiprocessing_get_nprocs())
phoebe.multiprocessing_on()
print(phoebe.multiprocessing_get_nprocs())
phoebe.multiprocessing_set_nprocs(2)
print(phoebe.multiprocessing_get_nprocs())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Accessing/Changing Multiprocessing Settings
Step2: To disable multiprocessing, we can call phoebe.multiprocessing_off.
Step3: To re-enable multiprocessing with all available CPUs on the machine, we can call phoebe.multiprocessing_on.
Step4: Or to manually set the number of processors to use, we can call phoebe.multiprocessing_set_nprocs.
|
4,704
|
<ASSISTANT_TASK:>
Python Code:
import mne
import os.path as op
# Read the info object from an example recording
info = mne.io.read_info(
op.join(mne.datasets.sample.data_path(), 'MEG', 'sample',
'sample_audvis_raw.fif'), verbose=False)
print('Keys in info dictionary:\n', info.keys())
print(info['sfreq'], 'Hz')
print(info['chs'][0])
channel_indices_two = mne.pick_channels(
info['ch_names'], ['MEG 0312', 'EEG 005'])
channel_indices_meg_re = mne.pick_channels_regexp(info['ch_names'], 'MEG *')
channel_indices_meg = mne.pick_types(info, meg=True) # MEG only
channel_indices_eeg = mne.pick_types(info, meg=False, eeg=True) # EEG only
channel_indices_grad_eeg = mne.pick_types(info, meg='grad', eeg=True)
channel_indices_by_type = mne.io.pick.channel_indices_by_type(info)
print('The first three magnetometers:', channel_indices_by_type['mag'][:3])
# Channel type of a specific channel
channel_type = mne.io.pick.channel_type(info, 75)
print('Channel #75 is of type:', channel_type)
meg_channels = mne.pick_types(info, meg=True)[:10]
channel_types = [mne.io.pick.channel_type(info, ch) for ch in meg_channels]
print('First 10 MEG channels are of type:\n', channel_types)
# Only keep EEG channels
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
reduced_info = mne.pick_info(info, eeg_indices)
print(reduced_info)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1:
Step2: List all the fields in the info object
Step3: Obtain the sampling rate of the data
Step4: List all information about the first data channel
Step5: Obtaining subsets of channels
Step6: Get channel indices by regular expression
Step7: Channel types
Step8: MEG gradiometers and EEG channels
Step9: Get a dictionary of channel indices, grouped by channel type
Step10: Obtaining information about channels
Step11: Channel types of a collection of channels
Step12: Dropping channels from an info structure
|
4,705
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.3,<2.4"
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger(clevel='INFO')
b = phoebe.default_binary()
b['incl@orbit'] = 56.789
print(b.save('test.phoebe'))
!head -n 30 test.phoebe
b2 = phoebe.Bundle.open('test.phoebe')
print(b2.get_value('incl@orbit'))
b = phoebe.Bundle.from_legacy('legacy.phoebe')
b.export_legacy('legacy_export.phoebe')
!head -n 30 legacy_export.phoebe
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Saving a Bundle
Step2: To save the Bundle to a file, we can call the save method of the Bundle and pass a filename.
Step3: We can now inspect the contents of the created file.
Step4: Loading a Bundle
Step5: Just to prove this worked, we can check to make sure we retained the changed value of inclination.
Step6: Support for Other Codes
Step7: Exporting to a PHOEBE Legacy file is also possible (although note that some parameters don't translate exactly or are not supported in PHOEBE Legacy), via b.export_legacy.
Step8: For the parameters that could not be directly translated, you should see a warning message (if you have warning messages enabled in your logger).
|
4,706
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1. / (1. + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) * hidden_outputs * (1 - hidden_outputs)
hidden_grad = np.dot(hidden_errors, inputs.T)
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T)
self.weights_input_to_hidden += self.lr * hidden_grad
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 1000
learning_rate = 0.1
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
|
4,707
|
<ASSISTANT_TASK:>
Python Code:
import rebound
sim = rebound.Simulation()
sim.add(m=1.)
print(sim.particles[0])
sim.add(m=1e-3, x=1., vy=1.)
sim.add(m=1e-3, a=2., e=0.1)
sim.status()
sim.integrator = "whfast"
sim.dt = 1e-3
sim.integrate(6.28318530717959, exact_finish_time=0) # 6.28318530717959 is 2*pi
sim.status()
particles = sim.particles
for p in particles:
print(p.x, p.y, p.vx, p.vy)
import numpy as np
torb = 2.*np.pi
Noutputs = 100
times = np.linspace(torb, 2.*torb, Noutputs)
x = np.zeros(Noutputs)
y = np.zeros(Noutputs)
for i,time in enumerate(times):
sim.integrate(time, exact_finish_time=0)
x[i] = particles[1].x
y[i] = particles[1].y
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,5))
ax = plt.subplot(111)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
plt.plot(x, y);
Noutputs = 1000
times = np.linspace(2.*torb, 20.*torb, Noutputs)
x = np.zeros(Noutputs)
y = np.zeros(Noutputs)
for i,time in enumerate(times):
sim.integrate(time, exact_finish_time=0)
x[i] = particles[1].x
y[i] = particles[1].y
fig = plt.figure(figsize=(5,5))
ax = plt.subplot(111)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
plt.plot(x, y);
sim.move_to_com()
times = np.linspace(20.*torb, 1000.*torb, Noutputs)
for i,time in enumerate(times):
sim.integrate(time, exact_finish_time=0)
x[i] = particles[1].x
y[i] = particles[1].y
fig = plt.figure(figsize=(5,5))
ax = plt.subplot(111)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
plt.scatter(x, y, marker='.', color='k', s=1.2);
times = np.linspace(1000.*torb, 9000.*torb, Noutputs)
a = np.zeros(Noutputs)
e = np.zeros(Noutputs)
for i,time in enumerate(times):
sim.integrate(time, exact_finish_time=0)
orbits = sim.calculate_orbits()
a[i] = orbits[1].a
e[i] = orbits[1].e
fig = plt.figure(figsize=(15,5))
ax = plt.subplot(121)
ax.set_xlabel("time")
ax.set_ylabel("semi-major axis")
plt.plot(times, a);
ax = plt.subplot(122)
ax.set_xlabel("time")
ax.set_ylabel("eccentricity")
plt.plot(times, e);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we create a REBOUND simulation instance. This object encapsulated all the variables and functions that REBOUND has to offer.
Step2: Now, we can add particles. We'll work in units in which $G=1$ (see Units.ipynb for using different units). The first particle we add is the central object. We place it at rest at the origin and use the convention of setting the mass of the central object $M_*$ to 1
Step3: Let's look at the particle we just added
Step4: The output tells us that the mass of the particle is 1 and all coordinates are zero.
Step5: Instead of initializing the particle with Cartesian coordinates, we can also use orbital elements. By default, REBOUND will use Jacobi coordinates, i.e. REBOUND assumes the orbital elements describe the particle's orbit around the centre of mass of all particles added previously. Our second planet will have a mass of $10^{-3}$, a semimajoraxis of $a=2$ and an eccentricity of $e=0.1$ (note that you shouldn't change G after adding particles this way, see Units.ipynb)
Step6: Now that we have added two more particles, let's have a quick look at what's in this simulation by using
Step7: You can see that REBOUND used the ias15 integrator as a default. Next, let's tell REBOUND that we want to use WHFast instead. We'll also set the timestep. In our system of units, an orbit at $a=1$ has an orbital period of $T_{\rm orb} =2\pi \sqrt{\frac{GM}{a}}= 2\pi$. So a reasonable timestep to start with would be $dt=10^{-3}$ (see Rein & Tamayo 2015 for some discussion on timestep choices).
Step8: whfast refers to the 2nd order symplectic integrator WHFast described by Rein & Tamayo (2015). By default, no symplectic correctors are used, but they can be easily turned on (see Advanced Settings for WHFast).
Step9: Once again, let's look at what REBOUND's status is
Step10: As you can see the time has advanced to $t=2\pi$ and the positions and velocities of all particles have changed. If you want to post-process the particle data, you can access it in the following way
Step11: The particles object is an array of pointers to the particles. This means you can call particles = sim.particles before the integration and the contents of particles will be updated after the integration. If you add or remove particles, you'll need to call sim.particles again.
Step12: Next, we'll step through the simulation. Rebound will integrate up to time. Depending on the timestep, it might overshoot slightly. If you want to have the outputs at exactly the time you specify, you can set the exact_finish_time=1 flag in the integrate function (or omit it altogether, 1 is the default). However, note that changing the timestep in a symplectic integrator could have negative impacts on its properties.
Step13: Let's plot the orbit using matplotlib.
Step14: Hurray! It worked. The orbit looks like it should, it's an almost perfect circle. There are small perturbations though, induced by the outer planet. Let's integrate a bit longer to see them.
Step15: Oops! This doesn't look like what we expected to see (small perturbations to an almost circluar orbit). What you see here is the barycenter slowly drifting. Some integration packages require that the simulation be carried out in a particular frame, but WHFast provides extra flexibility by working in any inertial frame. If you recall how we added the particles, the Sun was at the origin and at rest, and then we added the planets. This means that the center of mass, or barycenter, will have a small velocity, which results in the observed drift. There are multiple ways we can get the plot we want to.
Step16: So let's try this again. Let's integrate for a bit longer this time.
Step17: That looks much more like it. Let us finally plot the orbital elements as a function of time.
|
4,708
|
<ASSISTANT_TASK:>
Python Code:
!pip install dm-acme
!pip install dm-acme[reverb]
!pip install dm-acme[tf]
!pip install dm-sonnet
#@title Edit and run
mjkey =
REPLACE THIS LINE WITH YOUR MUJOCO LICENSE KEY
.strip()
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL deps
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Fetch MuJoCo binaries from Roboti
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=osmesa
# Install dm_control
!pip install dm_control
#@title Add your MuJoCo License and run
mjkey =
.strip()
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL dependencies
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Get MuJoCo binaries
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=osmesa
# Install dm_control, including extra dependencies needed for the locomotion
# mazes.
!pip install dm_control[locomotion_mazes]
!git clone https://github.com/google-research/realworldrl_suite.git
!pip install realworldrl_suite/
!git clone https://github.com/deepmind/deepmind-research.git
%cd deepmind-research
import collections
import copy
from typing import Mapping, Sequence
import acme
from acme import specs
from acme.agents.tf import actors
from acme.agents.tf import d4pg
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import loggers
from acme.wrappers import single_precision
from acme.tf import utils as tf2_utils
import numpy as np
import realworldrl_suite.environments as rwrl_envs
from reverb import replay_sample
import six
from rl_unplugged import rwrl
import sonnet as snt
import tensorflow as tf
domain_name = 'cartpole' #@param
task_name = 'swingup' #@param
difficulty = 'easy' #@param
combined_challenge = 'easy' #@param
combined_challenge_str = str(combined_challenge).lower()
tmp_path = '/tmp/rwrl'
gs_path = f'gs://rl_unplugged/rwrl'
data_path = (f'combined_challenge_{combined_challenge_str}/{domain_name}/'
f'{task_name}/offline_rl_challenge_{difficulty}')
!mkdir -p {tmp_path}/{data_path}
!gsutil cp -r {gs_path}/{data_path}/* {tmp_path}/{data_path}
num_shards_str, = !ls {tmp_path}/{data_path}/* | wc -l
num_shards = int(num_shards_str)
#@title Auxiliary functions
def flatten_observation(observation):
Flattens multiple observation arrays into a single tensor.
Args:
observation: A mutable mapping from observation names to tensors.
Returns:
A flattened and concatenated observation array.
Raises:
ValueError: If `observation` is not a `collections.MutableMapping`.
if not isinstance(observation, collections.MutableMapping):
raise ValueError('Can only flatten dict-like observations.')
if isinstance(observation, collections.OrderedDict):
keys = six.iterkeys(observation)
else:
# Keep a consistent ordering for other mappings.
keys = sorted(six.iterkeys(observation))
observation_arrays = [tf.reshape(observation[key], [-1]) for key in keys]
return tf.concat(observation_arrays, 0)
def preprocess_fn(sample):
o_tm1, a_tm1, r_t, d_t, o_t = sample.data[:5]
o_tm1 = flatten_observation(o_tm1)
o_t = flatten_observation(o_t)
return replay_sample.ReplaySample(
info=sample.info, data=(o_tm1, a_tm1, r_t, d_t, o_t))
batch_size = 10 #@param
environment = rwrl_envs.load(
domain_name=domain_name,
task_name=f'realworld_{task_name}',
environment_kwargs=dict(log_safety_vars=False, flat_observation=True),
combined_challenge=combined_challenge)
environment = single_precision.SinglePrecisionWrapper(environment)
environment_spec = specs.make_environment_spec(environment)
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
dataset = rwrl.dataset(
tmp_path,
combined_challenge=combined_challenge_str,
domain=domain_name,
task=task_name,
difficulty=difficulty,
num_shards=num_shards,
shuffle_buffer_size=10)
dataset = dataset.map(preprocess_fn).batch(batch_size)
#@title Auxiliary functions
def make_networks(
action_spec: specs.BoundedArray,
hidden_size: int = 1024,
num_blocks: int = 4,
num_mixtures: int = 5,
vmin: float = -150.,
vmax: float = 150.,
num_atoms: int = 51,
):
Creates networks used by the agent.
num_dimensions = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
networks.LayerNormAndResidualMLP(
hidden_size=hidden_size, num_blocks=num_blocks),
# Converts the policy output into the same shape as the action spec.
snt.Linear(num_dimensions),
# Note that TanhToSpec applies tanh to the input.
networks.TanhToSpec(action_spec)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(
critic_network=networks.LayerNormAndResidualMLP(
hidden_size=hidden_size, num_blocks=num_blocks),
observation_network=tf2_utils.batch_concat),
networks.DiscreteValuedHead(vmin, vmax, num_atoms)
])
return {
'policy': policy_network,
'critic': critic_network,
}
# Create the networks to optimize.
online_networks = make_networks(act_spec)
target_networks = copy.deepcopy(online_networks)
# Create variables.
tf2_utils.create_variables(online_networks['policy'], [obs_spec])
tf2_utils.create_variables(online_networks['critic'], [obs_spec, act_spec])
tf2_utils.create_variables(target_networks['policy'], [obs_spec])
tf2_utils.create_variables(target_networks['critic'], [obs_spec, act_spec])
# The learner updates the parameters (and initializes them).
learner = d4pg.D4PGLearner(
policy_network=online_networks['policy'],
critic_network=online_networks['critic'],
target_policy_network=target_networks['policy'],
target_critic_network=target_networks['critic'],
dataset=dataset,
discount=0.99,
target_update_period=100)
for _ in range(100):
learner.step()
# Create a logger.
logger = loggers.TerminalLogger(label='evaluation', time_delta=1.)
# Create an environment loop.
loop = acme.EnvironmentLoop(
environment=environment,
actor=actors.DeprecatedFeedForwardActor(online_networks['policy']),
logger=logger)
loop.run(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: MuJoCo
Step4: Machine-locked MuJoCo license.
Step5: RWRL
Step6: RL Unplugged
Step7: Imports
Step8: Data
Step10: Dataset and environment
Step12: D4PG learner
Step13: Training loop
Step14: Evaluation
|
4,709
|
<ASSISTANT_TASK:>
Python Code:
text1 = "Ethics are built right into the ideals and objectives of the United Nations "
len(text1) # The length of text1
text2 = text1.split(' ') # Return a list of the words in text2, separating by ' '.
len(text2)
text2
[w for w in text2 if len(w) > 3] # Words that are greater than 3 letters long in text2
[w for w in text2 if w.istitle()] # Capitalized words in text2
[w for w in text2 if w.endswith('s')] # Words in text2 that end in 's'
text3 = 'To be or not to be'
text4 = text3.split(' ')
len(text4)
len(set(text4))
set(text4)
len(set([w.lower() for w in text4])) # .lower converts the string to lowercase.
set([w.lower() for w in text4])
text5 = '"Ethics are built right into the ideals and objectives of the United Nations" \
#UNSG @ NY Society for Ethical Culture bit.ly/2guVelr'
text6 = text5.split(' ')
text6
[w for w in text6 if w.startswith('#')]
[w for w in text6 if w.startswith('@')]
text7 = '@UN @UN_Women "Ethics are built right into the ideals and objectives of the United Nations" \
#UNSG @ NY Society for Ethical Culture bit.ly/2guVelr'
text8 = text7.split(' ')
import re # import re - a module that provides support for regular expressions
[w for w in text8 if re.search('@[A-Za-z0-9_]+', w)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step2: <br>
Step3: Processing free-text
Step4: <br>
Step5: <br>
Step6: <br>
|
4,710
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, fit_dipole
from mne.datasets import fetch_phantom
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
print(__doc__)
data_path = bst_phantom_elekta.data_path(verbose=True)
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)
events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG1933', 'MEG2421']
raw.plot_psd(tmax=30., average=False)
raw.plot(events=events)
tmin, tmax = -0.1, 0.1
bmax = -0.05 # Avoid capture filter ringing into baseline
event_id = list(range(1, 33))
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, bmax),
preload=False)
epochs['1'].average().plot(time_unit='s')
subjects_dir = data_path
fetch_phantom('otaniemi', subjects_dir=subjects_dir)
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08)
subject = 'phantom_otaniemi'
trans = mne.transforms.Transform('head', 'mri', np.eye(4))
mne.viz.plot_alignment(
epochs.info, subject=subject, show_axes=True, bem=sphere, dig=True,
surfaces=('head-dense', 'inner_skull'), trans=trans, mri_fiducials=True,
subjects_dir=subjects_dir)
# here we can get away with using method='oas' for speed (faster than "shrunk")
# but in general "shrunk" is usually better
cov = mne.compute_covariance(epochs, tmax=bmax)
mne.viz.plot_evoked_white(epochs['1'].average(), cov)
data = []
t_peak = 0.036 # true for Elekta phantom
for ii in event_id:
# Avoid the first and last trials -- can contain dipole-switching artifacts
evoked = epochs[str(ii)][1:-1].average().crop(t_peak, t_peak)
data.append(evoked.data[:, 0])
evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
del epochs
dip, residual = fit_dipole(evoked, cov, sphere, n_jobs=1)
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
for text in list(ax.texts):
text.remove()
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
actual_pos, actual_ori = mne.dipole.get_phantom_dipoles()
actual_amp = 100. # nAm
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7))
diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
print('mean(position error) = %0.1f mm' % (np.mean(diffs),))
ax1.bar(event_id, diffs)
ax1.set_xlabel('Dipole index')
ax1.set_ylabel('Loc. error (mm)')
angles = np.rad2deg(np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1))))
print(u'mean(angle error) = %0.1f°' % (np.mean(angles),))
ax2.bar(event_id, angles)
ax2.set_xlabel('Dipole index')
ax2.set_ylabel(u'Angle error (°)')
amps = actual_amp - dip.amplitude / 1e-9
print('mean(abs amplitude error) = %0.1f nAm' % (np.mean(np.abs(amps)),))
ax3.bar(event_id, amps)
ax3.set_xlabel('Dipole index')
ax3.set_ylabel('Amplitude error (nAm)')
fig.tight_layout()
plt.show()
actual_amp = np.ones(len(dip)) # misc amp to create Dipole instance
actual_gof = np.ones(len(dip)) # misc GOF to create Dipole instance
dip_true = \
mne.Dipole(dip.times, actual_pos, actual_amp, actual_ori, actual_gof)
fig = mne.viz.plot_alignment(
evoked.info, trans, subject, bem=sphere, surfaces={'head-dense': 0.2},
coord_frame='head', meg='helmet', show_axes=True,
subjects_dir=subjects_dir)
# Plot the position and the orientation of the actual dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip_true, mode='arrow',
subject=subject, color=(0., 0., 0.),
fig=fig)
# Plot the position and the orientation of the estimated dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip, mode='arrow', subject=subject,
color=(0.2, 1., 0.5), fig=fig)
mne.viz.set_3d_view(figure=fig, azimuth=70, elevation=80, distance=0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
Step2: Data channel array consisted of 204 MEG planor gradiometers,
Step3: The data have strong line frequency (60 Hz and harmonics) and cHPI coil
Step4: Our phantom produces sinusoidal bursts at 20 Hz
Step5: Now we epoch our data, average it, and look at the first dipole response.
Step6: Let's use a sphere head geometry model <eeg_sphere_model>
Step7: Let's do some dipole fits. We first compute the noise covariance,
Step8: Do a quick visualization of how much variance we explained, putting the
Step9: Now we can compare to the actual locations, taking the difference in mm
Step10: Let's plot the positions and the orientations of the actual and the estimated
|
4,711
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import display, HTML;from string import Template;
HTML('<script src="//d3js.org/d3.v3.min.js" charset="utf-8"></script>')
css_text2 = '''
#main { float: left; width: 750px;}#sidebar { float: right; width: 100px;}#sequence { width: 600px; height: 70px;}#legend { padding: 10px 0 0 3px;}#sequence text, #legend text { font-weight: 400; fill: #000000; font-size: 0.75em;}#graph-div2 { position: relative;}#graph-div2 { stroke: #fff;}#explanation { position: absolute; top: 330px; left: 405px; width: 140px; text-align: center; color: #666; z-index: -1;}#percentage { font-size: 2.3em;}
'''
with open('interactive_circle_pd.js', 'r') as myfile:
data=myfile.read()
js_text_template2 = Template(data)
html_template = Template('''
<style> $css_text </style>
<div id="sequence"></div>
<div id="graph-div2"></div>
<div id="explanation" style="visibility: hidden;">
<span id="percentage"></span><br/>
of patients meet this criteria
</div>
</div>
<script> $js_text </script>
''');
js_text2 = js_text_template2.substitute({'graphdiv': 'graph-div2'});
HTML(html_template.substitute({'css_text': css_text2, 'js_text': js_text2}))
import pandas as pd
prim_cont = pd.read_csv(open("./python_scripts/09_prim_diag_table.csv","r"))
prim_cont
#Anxiety Missing Model
import pickle
missings = pickle.load(open("./python_scripts/13-missing_model_validPPSScore.p", "rb"))
missings_consultlocmissing = pickle.load(open("./python_scripts/13-missing_model_missingPPSScore.p", "rb"))
#Anxiety
print(missings.get("anxiety")[0])
print(missings_consultlocmissing.get('anxiety'))
print(missings.get("appetite")[0])
#Caution, should not use - did not converge
print(missings.get("constipation")[0])
print(missings.get("depression")[0])
print(missings.get("drowsiness")[0])
#Caution, should not use - did not converge
print(missings.get("nausea")[0])
print(missings.get("pain")[0])
print(missings.get("shortness")[0])
print(missings.get("tiredness")[0])
print(missings.get("wellbeing")[0])
#first read in the (summary) data
import pandas as pd
summ = pd.read_csv(open("./python_scripts/14_number_of_modsev_symptoms.csv","r"))
#now print those with '1 or more moderate/severe symptoms'
summ[0:2]
#now print those with '2 or more moderate/severe symptoms'
summ[2:4]
#now print those with '3 or more moderate/severe symptoms'
summ[4:6]
import pandas as pd
table = pd.read_csv(open('./python_scripts/11_primarydiagnosis_tables_catv2.csv','r'))
#Anxiety
table[0:5]
#Appetite
table[5:10]
#Constipation
table[10:15]
#Depression
table[15:20]
#Drowsiness
table[20:25]
#Nausea
table[25:30]
#Pain
table[30:35]
#Shortness
table[35:40]
#Tiredness
table[40:45]
#Well Being
table[45:50]
# PPSScore
table[50:51]
import pickle
models = pickle.load(open("./python_scripts/12-model_results.p", "rb"))
#Anxiety
print(models.get("anxiety")[0])
#Anxiety Marginal Effects
print(models.get("anxiety")[1])
#Appetite
print(models.get("appetite")[0])
#Constipation
print(models.get("constipation")[0])
#Depression
print(models.get("depression")[0])
#Drowsiness
print(models.get("drowsiness")[0])
#Nausea
print(models.get("nausea")[0])
#Pain
print(models.get("pain")[0])
#Shortness
print(models.get("shortness")[0])
#Tiredness
print(models.get("tiredness")[0])
#Wellbeing
print(models.get("wellbeing")[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graphic Interpretation
Step2: AIM
Step3: Conclusions
Step4: <a id='num_mod_sev'></a>
Step5: <a id='ua'></a>
Step6: Potential Conlusions
Step7: Conclusions
|
4,712
|
<ASSISTANT_TASK:>
Python Code:
# Evaluate this cell to identifiy your form
from dkrz_forms import form_widgets, form_handler, checks
form_infos = form_widgets.show_selection()
# Evaluate this cell to generate your personal form instance
form_info = form_infos[form_widgets.FORMS.value]
sf = form_handler.init_form(form_info)
form = sf.sub.entity_out.report
form.submission_type = "init" # example: sf.submission_type = "initial_version"
form.cmor = '..' ## options: 'CMOR', 'CDO-CMOR', etc.
form.cmor_compliance_checks = '..' ## please name the tool you used to check your files with respect to CMIP6 compliance
## 'PREPARE' for the CMOR PREPARE checker and "DKRZ" for the DKRZ tool.
form.es_doc = " .. " # 'yes' related esdoc model information is/will be available, 'no' otherwise
form.errata = " .. " # 'yes' if errata information was provided based on the CMIP6 errata mechanism
# fill the following info only in case this form refers to new versions of already published ESGF data
form.errata_id = ".." # the errata id provided by the CMIP6 errata mechanism
form.errata_comment = "..." # any additional information on the reason of this new version, not yet provided
form.uniqueness_of_tracking_id = "..." # example: form.uniqueness_of_tracking_id = "yes"
form.data_dir_1 = " ... "
# uncomment for additional entries ...
# form.data_dir_2 = " ... "
# form.data_dir_3 = " ... "
# ...
form.time_period = "..." # example: sf.time_period = "197901-201412"
# ["time_period_a","time_period_b"] in case of multiple values
form.grid = ".."
form.exclude_variables_list = "..." # example: sf.exclude_variables_list=["bnds", "vertices"]
form.terms_of_use = "..." # has to be "ok"
form.data_path = "..." # example: sf.data_path = "mistral.dkrz.de:/mnt/lustre01/work/bm0021/k204016/CORDEX/archive/"
form.data_information = "..." # ...any info where data can be accessed and transfered to the data center ... "
form.example_file_name = "..." # example: sf.example_file_name = "tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc"
# simple consistency check report for your submission form - not completed
report = checks.check_report(sf,"sub")
checks.display_report(report)
form_handler.save_form(sf,"any comment you want") # add a comment
# evaluate this cell if you want a reference (provided by email)
# (only available if you access this form via the DKRZ hosting service)
form_handler.email_form_info(sf)
#form_handler.email_form_info(sf)
form_handler.form_submission(sf)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: CMOR compliance
Step3: Documentation availability
Step4: Uniqueness of tracking_id and creation_date
Step5: Generic content characterization based on CMIP6 directory structure
Step6: Provide specific additional information for this submission
Step7: Exclude variable list
Step8: CMIP6 terms of use
Step9: Step 2
Step10: Example file name
Step11: Step 3
Step12: Step 4
Step13: Step 5
|
4,713
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
self.activation_function = lambda x : 1 / (1+np.exp(-x))
self.derivative = lambda x : x * (1 - x)
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = features.dot(self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer - Replace these values with your calculations.
final_inputs = hidden_outputs.dot(self.weights_hidden_to_output)
final_outputs = self.activation_function(final_inputs)
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = (targets - final_outputs)
output_error_term = error * self.derivative(final_outputs)
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = output_error_term * self.weights_hidden_to_output
hidden_error_term = hidden_error * self.derivative(hidden_outputs)
# Weight step (input to hidden)
print(delta_weights_i_h, hidden_error_term , self.lr)
delta_weights_i_h += hidden_error_term * self.lr
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * self.lr
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += delta_weights_h_o # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += delta_weights_i_h # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = features.dot(self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = hidden_outputs.dot(self.weights_hidden_to_output)
final_outputs = self.activation_function(final_inputs)
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
print('okay')
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
a = np.array([[-0.00233536, -0.00231863],[ 0.00077845, 0.00077288]])
b=0.5
c = np.array([[ 0., 0.],
[ 0., 0.],
[ 0. , 0.]] )
c + a*b
import sys
### Set the hyperparameters here ###
iterations = 100
learning_rate = 0.1
hidden_nodes = 2
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
4,714
|
<ASSISTANT_TASK:>
Python Code:
# importing
import numpy as np
from collections import Counter
def LZ77_encode ( input_sequence, window_length = 10 ):
'''
Implementation of LZ77 encoding
IN: input_sequence ( list or np.array of letters )
OUT: list of 3-tuples with each tuple being (a,b,x) where a is how far we look back in the sequence, b is the length of the string to copy and x the next letter
'''
return_value = [(0,0,input_sequence[0])]
k = 1
while k < len(input_sequence):
# test all possible look_backs
best_b = 0
best_a = 0
# look back over window of past
for a_test in range(1,min(k+1, window_length+1)):
b_test = 0
while input_sequence[k+b_test] == input_sequence[k-a_test+b_test]:
b_test += 1
if k+b_test == len(input_sequence):
break
if b_test > best_b:
best_b = b_test
best_a = a_test
if k+best_b == len(input_sequence):
return_value.append( (best_a, best_b, 'EOF'))
else:
return_value.append( (best_a, best_b, input_sequence[k+best_b]))
k = k+best_b+1
return return_value
def LZ77_decode( input_tuples ):
'''
Implementation of LZ77 decoding
IN: list of 3-tuples (a,b,x) being generated by the LZ77 encoder
OUT: reconstructed input sequence
'''
return_value = []
for s in input_tuples:
m = len(return_value)
for i in range(s[1]):
return_value.append(return_value[m-s[0]+i])
if s[2] == 'EOF':
break
return_value.append(s[2])
return return_value
print('Example 1: abracadabra')
enc1 = LZ77_encode('abracadabra')
print(enc1)
print(''.join(LZ77_decode(enc1)))
print('---------------')
print('Example 2: abracadabrad')
enc2 = LZ77_encode('abracadabrad')
print(enc2)
print(''.join(LZ77_decode(enc2)))
print('---------------')
print('Example 3: HAHAHAHA!')
enc3 = LZ77_encode('HAHAHAHA!')
print(enc3)
print(''.join(LZ77_decode(enc3)))
print('---------------')
print('Example 4: nachrichtentechnikistelegant')
enc4 = LZ77_encode('nachrichtentechnikistelegant')
print(enc4)
print(''.join(LZ77_decode(enc4)))
print('---------------')
# Huffman function
def huffman_recursive( symb_dict, show_steps = 0 ):
'''
Recursive implementation of Huffman coding
partly according to: https://gist.github.com/mreid/fdf6353ec39d050e972b
Note: For convenience the two most unlikely symbols are at the beginning/the first indices of the dict
IN: symb_dict ( dictionary of { letter : probability } )
show_steps ( boolean allowing output of intermediate codes/intermediate steps )
OUT: code_dict (dict of shape { letter: codeword } )
'''
# check that probability equals 1.0 (approximately)
np.testing.assert_almost_equal( sum( symb_dict.values() ), 1.0 )
# if length equals 2 use 1 bit,
# the shorter sequence obtaining leading 0
if len( symb_dict) == 2:
# compare sequence lengths and return coding ( shorter sequence coded by leading 0 )
if len( list( symb_dict.keys() )[1] ) > len( list( symb_dict.keys() )[0] ):
return dict( zip( symb_dict.keys(), ['1', '0' ] ) )
else:
return dict( zip( symb_dict.keys(), ['0', '1' ] ) )
# copy dict
symb_dict_new = symb_dict.copy()
# sort dict w.r.t. increasing probability
#
# NOTE: lambda is an on-the-fly definition of a function of syntax "lambda with variables: do";
# so lambda t: t[1] simply gets second element of t
symb_dict_new_sorted = sorted( symb_dict_new.items(), key=lambda t: t[1])
# if activated, show intermediate dicts for illustration
if show_steps:
dict_for_printing = [ ( key, round(val, 4) ) for key, val in symb_dict_new_sorted ]
print( dict_for_printing )
print( )
# find two least probable symbols
# NOTE: - [ i ] gives a dict entry;
# - [ i ][ 0 ] gives the key of the dict entry, corresponding to the symbol
s_N_1 = symb_dict_new_sorted[ 1 ][ 0 ]
s_N = symb_dict_new_sorted[ 0 ][ 0 ]
# pop according entries and create a new one with sum probability
# key is concatenation of the old symbols
p_N_1 = symb_dict_new.pop( s_N_1 )
p_N = symb_dict_new.pop( s_N )
symb_dict_new[ s_N + s_N_1 ] = p_N + p_N_1
# apply recursion for the reduced symbol set
code_dict = huffman_recursive( symb_dict_new, show_steps )
# get codeword and append '1'/'0' for going up/down respectively
cw = code_dict.pop( s_N + s_N_1 )
code_dict[ s_N_1 ] = cw + '1'
code_dict[ s_N ] = cw + '0'
return code_dict
# two booleans for
# choosing example to be used and
# choosing whether or not showing intermediate results of Huffman
example = 4
show_intermediate_steps = False
if example == 1:
string = 'abracadabra'
elif example == 2:
string = 'abracadabrad'
elif example == 3:
string = 'HAHAHAHA!'
elif example == 4:
string = 'nachrichtentechnikistelegant'
encoded_tuples = LZ77_encode(string)
# transform encoded_tuples to dict (with probabilities) and normalize to have sum equal to 1
symb_dict = dict([(ele,count/len(encoded_tuples)) for ele, count in Counter(encoded_tuples).items()])
s = sum( symb_dict.values() )
symb_dict.update( (key, val / s ) for key, val in symb_dict.items() )
# apply Huffman function defined above
code = huffman_recursive( symb_dict , show_intermediate_steps )
# print various information
print('-------------------------')
print('Huffman coding: \n\n {}\n'.format( sorted( code.items(), key=lambda t: t[0] ) ) )
# determine average codeword length
L = 0
for l, p in symb_dict.items():
L += p * len( code[ l ] )
print('-------------------------')
print('Average codeword length: \tL = {:2.2f}'.format( L ) )
p_letters = list( symb_dict.values() )
print('Entropy: \t\t\tH(X) = {}'.format( - np.sum( p_letters * np.log2( p_letters ) ) ) )
print('Max. Entropie: \t\t\tH0 = {:2.2f}'.format( np.log2(len(p_letters))) )
# code text by simply parsing symbols and concatenating according codewords
# requires previous sections to be completed
coded_stream = []
for t in encoded_tuples:
coded_stream.append( code[ t ] )
# print various information
print('-------------------------')
print('Original text: {}\n'.format( string ) )
print('Number of letters: {}\n\n'.format( len( string ) ) )
print('Coded: {}\n'.format( ''.join( coded_stream ) ) )
print('Bits with Huffman: {}\n\n'.format( len( ''.join( coded_stream ) ) ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementation of Lempel-Ziv-77
Step2: Decoding algorithm, just the reconstruction of the string by looking up the data
Step3: Recursive Implementation of Huffman
Step4: Applying LZ77 following by Huffman coding to different examples
|
4,715
|
<ASSISTANT_TASK:>
Python Code:
import nlp
from nlp import Page, HITS
from nlp import Lexicon, Rules, Grammar, ProbLexicon, ProbRules, ProbGrammar
from nlp import CYK_parse, Chart
from notebook import psource
psource(Lexicon, Rules, Grammar)
lexicon = Lexicon(
Verb = "is | say | are",
Noun = "robot | sheep | fence",
Adjective = "good | new | sad",
Adverb = "here | lightly | now",
Pronoun = "me | you | he",
RelPro = "that | who | which",
Name = "john | mary | peter",
Article = "the | a | an",
Preposition = "to | in | at",
Conjunction = "and | or | but",
Digit = "1 | 2 | 0"
)
print("Lexicon", lexicon)
rules = Rules(
S = "NP VP | S Conjunction S",
NP = "Pronoun | Name | Noun | Article Noun \
| Article Adjs Noun | Digit | NP PP | NP RelClause",
VP = "Verb | VP NP | VP Adjective | VP PP | VP Adverb",
Adjs = "Adjective | Adjective Adjs",
PP = "Preposition NP",
RelClause = "RelPro VP"
)
print("\nRules:", rules)
grammar = Grammar("A Simple Grammar", rules, lexicon)
print("How can we rewrite 'VP'?", grammar.rewrites_for('VP'))
print("Is 'the' an article?", grammar.isa('the', 'Article'))
print("Is 'here' a noun?", grammar.isa('here', 'Noun'))
E_Chomsky = Grammar("E_Prob_Chomsky", # A Grammar in Chomsky Normal Form
Rules(
S = "NP VP",
NP = "Article Noun | Adjective Noun",
VP = "Verb NP | Verb Adjective",
),
Lexicon(
Article = "the | a | an",
Noun = "robot | sheep | fence",
Adjective = "good | new | sad",
Verb = "is | say | are"
))
print(E_Chomsky.cnf_rules())
grammar.generate_random('S')
psource(ProbLexicon, ProbRules, ProbGrammar)
lexicon = ProbLexicon(
Verb = "is [0.5] | say [0.3] | are [0.2]",
Noun = "robot [0.4] | sheep [0.4] | fence [0.2]",
Adjective = "good [0.5] | new [0.2] | sad [0.3]",
Adverb = "here [0.6] | lightly [0.1] | now [0.3]",
Pronoun = "me [0.3] | you [0.4] | he [0.3]",
RelPro = "that [0.5] | who [0.3] | which [0.2]",
Name = "john [0.4] | mary [0.4] | peter [0.2]",
Article = "the [0.5] | a [0.25] | an [0.25]",
Preposition = "to [0.4] | in [0.3] | at [0.3]",
Conjunction = "and [0.5] | or [0.2] | but [0.3]",
Digit = "0 [0.35] | 1 [0.35] | 2 [0.3]"
)
print("Lexicon", lexicon)
rules = ProbRules(
S = "NP VP [0.6] | S Conjunction S [0.4]",
NP = "Pronoun [0.2] | Name [0.05] | Noun [0.2] | Article Noun [0.15] \
| Article Adjs Noun [0.1] | Digit [0.05] | NP PP [0.15] | NP RelClause [0.1]",
VP = "Verb [0.3] | VP NP [0.2] | VP Adjective [0.25] | VP PP [0.15] | VP Adverb [0.1]",
Adjs = "Adjective [0.5] | Adjective Adjs [0.5]",
PP = "Preposition NP [1]",
RelClause = "RelPro VP [1]"
)
print("\nRules:", rules)
grammar = ProbGrammar("A Simple Probabilistic Grammar", rules, lexicon)
print("How can we rewrite 'VP'?", grammar.rewrites_for('VP'))
print("Is 'the' an article?", grammar.isa('the', 'Article'))
print("Is 'here' a noun?", grammar.isa('here', 'Noun'))
E_Prob_Chomsky = ProbGrammar("E_Prob_Chomsky", # A Probabilistic Grammar in CNF
ProbRules(
S = "NP VP [1]",
NP = "Article Noun [0.6] | Adjective Noun [0.4]",
VP = "Verb NP [0.5] | Verb Adjective [0.5]",
),
ProbLexicon(
Article = "the [0.5] | a [0.25] | an [0.25]",
Noun = "robot [0.4] | sheep [0.4] | fence [0.2]",
Adjective = "good [0.5] | new [0.2] | sad [0.3]",
Verb = "is [0.5] | say [0.3] | are [0.2]"
))
print(E_Prob_Chomsky.cnf_rules())
sentence, prob = grammar.generate_random('S')
print(sentence)
print(prob)
psource(HITS)
testHTML = Like most other male mammals, a man inherits an
X from his mom and a Y from his dad.
testHTML2 = "a mom and a dad"
pA = Page('A', ['B', 'C', 'E'], ['D'])
pB = Page('B', ['E'], ['A', 'C', 'D'])
pC = Page('C', ['B', 'E'], ['A', 'D'])
pD = Page('D', ['A', 'B', 'C', 'E'], [])
pE = Page('E', [], ['A', 'B', 'C', 'D', 'F'])
pF = Page('F', ['E'], [])
nlp.pageDict = {pA.address: pA, pB.address: pB, pC.address: pC,
pD.address: pD, pE.address: pE, pF.address: pF}
nlp.pagesIndex = nlp.pageDict
nlp.pagesContent ={pA.address: testHTML, pB.address: testHTML2,
pC.address: testHTML, pD.address: testHTML2,
pE.address: testHTML, pF.address: testHTML2}
HITS('mammals')
page_list = ['A', 'B', 'C', 'D', 'E', 'F']
auth_list = [pA.authority, pB.authority, pC.authority, pD.authority, pE.authority, pF.authority]
hub_list = [pA.hub, pB.hub, pC.hub, pD.hub, pE.hub, pF.hub]
for i in range(6):
p = page_list[i]
a = auth_list[i]
h = hub_list[i]
print("{}: total={}, auth={}, hub={}".format(p, a + h, a, h))
psource(CYK_parse)
E_Prob_Chomsky = ProbGrammar("E_Prob_Chomsky", # A Probabilistic Grammar in CNF
ProbRules(
S = "NP VP [1]",
NP = "Article Noun [0.6] | Adjective Noun [0.4]",
VP = "Verb NP [0.5] | Verb Adjective [0.5]",
),
ProbLexicon(
Article = "the [0.5] | a [0.25] | an [0.25]",
Noun = "robot [0.4] | sheep [0.4] | fence [0.2]",
Adjective = "good [0.5] | new [0.2] | sad [0.3]",
Verb = "is [0.5] | say [0.3] | are [0.2]"
))
words = ['the', 'robot', 'is', 'good']
grammar = E_Prob_Chomsky
P = CYK_parse(words, grammar)
print(P)
parses = {k: p for k, p in P.items() if p >0}
print(parses)
psource(Chart)
chart = Chart(nlp.E0)
print(chart.parses('the stench is in 2 2'))
chart_trace = Chart(nlp.E0, trace=True)
chart_trace.parses('the stench is in 2 2')
print(chart.parses('the stench 2 2'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CONTENTS
Step2: Let's build a lexicon and a grammar for the above language
Step3: Both the functions return a dictionary with keys the left-hand side of the rules. For the lexicon, the values are the terminals for each left-hand side non-terminal, while for the rules the values are the right-hand sides as lists.
Step4: If the grammar is in Chomsky Normal Form, we can call the class function cnf_rules to get all the rules in the form of (X, Y, Z) for each X -> Y Z rule. Since the above grammar is not in CNF though, we have to create a new one.
Step5: Finally, we can generate random phrases using our grammar. Most of them will be complete gibberish, falling under the overgenerated phrases of the grammar. That goes to show that in the grammar the valid phrases are much fewer than the overgenerated ones.
Step6: Probabilistic
Step7: Let's build a lexicon and rules for the probabilistic grammar
Step8: Let's use the above to assemble our probabilistic grammar and run some simple queries
Step9: If we have a grammar in CNF, we can get a list of all the rules. Let's create a grammar in the form and print the CNF rules
Step10: Lastly, we can generate random sentences from this grammar. The function prob_generation returns a tuple (sentence, probability).
Step11: As with the non-probabilistic grammars, this one mostly overgenerates. You can also see that the probability is very, very low, which means there are a ton of generateable sentences (in this case infinite, since we have recursion; notice how VP can produce another VP, for example).
Step13: First we compile the collection of pages as mentioned above. Then, we initialize the authority and hub scores for each page and finally we update and normalize the values until convergence.
Step14: We can now run the HITS algorithm. Our query will be 'mammals' (note that while the content of the HTML doesn't matter, it should include the query words or else no page will be picked at the first step).
Step15: Let's see how the pages were scored
Step16: The top score is 0.82 by "C". This is the most relevant page according to the algorithm. You can see that the pages it links to, "A" and "D", have the two highest authority scores (therefore "C" has a high hub score) and the pages it is linked from, "B" and "E", have the highest hub scores (so "C" has a high authority score). By combining these two facts, we get that "C" is the most relevant page. It is worth noting that it does not matter if the given page contains the query words, just that it links and is linked from high-quality pages.
Step17: When updating the probability of a substring, we pick the max of its current one and the probability of the substring broken into two parts
Step18: Now let's see the probabilities table for the sentence "the robot is good"
Step19: A defaultdict object is returned (defaultdict is basically a dictionary but with a default value/type). Keys are tuples in the form mentioned above and the values are the corresponding probabilities. Most of the items/parses have a probability of 0. Let's filter those out to take a better look at the parses that matter.
Step20: The item ('Article', 0, 1)
Step21: Example
Step22: And then we simply call the parses function
Step23: You can see which edges get added by setting the optional initialization argument trace to true.
Step24: Let's try and parse a sentence that is not recognized by the grammar
|
4,716
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
# Add your filename and uncomment the following line:
#Image(filename='drought.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graphical excellence and integrity
|
4,717
|
<ASSISTANT_TASK:>
Python Code:
dx = 1.
x = 1.
while(dx > 1.e-10):
dy = (x+dx)*(x+dx)-x*x
d = dy / dx
print("%6.0e %20.16f %20.16f" % (dx, d, d-2.))
dx = dx / 10.
((1.+0.0001)*(1+0.0001)-1)
dx = 1.
x = 1.
while(dx > 1.e-10):
dy = (x+dx)*(x+dx)-x*x
d = dy / dx
print("%8.5e %20.16f %20.16f" % (dx, d, d-2.))
dx = dx / 2.
from math import sin, sqrt, pi
dx = 1.
while(dx > 1.e-10):
x = pi/4.
d1 = sin(x+dx) - sin(x); #forward
d2 = sin(x+dx*0.5) - sin(x-dx*0.5); # midpoint
d1 = d1 / dx;
d2 = d2 / dx;
print("%8.5e %20.16f %20.16f %20.16f %20.16f" % (dx, d1, d1-sqrt(2.)/2., d2, d2-sqrt(2.)/2.) )
dx = dx / 2.
%matplotlib inline
import numpy as np
from matplotlib import pyplot
y = lambda x: x*x
x1 = np.arange(0,10,1)
x2 = np.arange(0,10,0.1)
y1 = np.gradient(y(x1), 1.)
print (y1)
pyplot.plot(x1,np.gradient(y(x1),1.),'r--o');
pyplot.plot(x1[:x1.size-1],np.diff(y(x1))/np.diff(x1),'b--x');
pyplot.plot(x2,np.gradient(y(x2),0.1),'b--o');
from scipy.misc import derivative
y = lambda x: x**2
dx = 1.
x = 1.
while(dx > 1.e-10):
d = derivative(y, x, dx, n=1, order=3)
print("%6.0e %20.16f %20.16f" % (dx, d, d-2.))
dx = dx / 10.
from decimal import Decimal
dx = Decimal("1.")
while(dx >= Decimal("1.e-10")):
x = Decimal("1.")
dy = (x+dx)*(x+dx)-x*x
d = dy / dx
print("%6.0e %20.16f %20.16f" % (dx, d, d-Decimal("2.")))
dx = dx / Decimal("10.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Why is it that the sequence does not converge? This is due to the round-off errors in the representation of the floating point numbers. To see this, we can simply type
Step2: Let's try using powers of 1/2
Step3: In addition, one could consider the midpoint difference, defined as
Step4: A more in-depth discussion about round-off errors in numerical differentiation can be found <a href="http
Step5: Notice above that gradient() uses forward and backward differences at the two ends.
Step6: More discussion about numerical differenciation, including higher order methods with error extrapolation can be found <a href="http
Step7: One way to improve the roundoff errors is by simply using the decimal package
|
4,718
|
<ASSISTANT_TASK:>
Python Code:
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_particle1 = mb.Particle(name='LJ', pos=[0, 0, 0])
self.add(lj_particle1)
lj_particle2 = mb.Particle(name='LJ', pos=[1, 0, 0])
self.add(lj_particle2)
lj_particle3 = mb.Particle(name='LJ', pos=[0, 1, 0])
self.add(lj_particle3)
lj_particle4 = mb.Particle(name='LJ', pos=[0, 0, 1])
self.add(lj_particle4)
lj_particle5 = mb.Particle(name='LJ', pos=[1, 0, 1])
self.add(lj_particle5)
lj_particle6 = mb.Particle(name='LJ', pos=[1, 1, 0])
self.add(lj_particle6)
lj_particle7 = mb.Particle(name='LJ', pos=[0, 1, 1])
self.add(lj_particle7)
lj_particle8 = mb.Particle(name='LJ', pos=[1, 1, 1])
self.add(lj_particle8)
monoLJ = MonoLJ()
monoLJ.visualize()
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
for i in range(0,2):
for j in range(0,2):
for k in range(0,2):
lj_particle = mb.clone(lj_proto)
pos = [i,j,k]
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
monoLJ.visualize()
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern = mb.Grid3DPattern(2, 2, 2)
pattern.scale(2)
for pos in pattern:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
monoLJ.visualize()
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern = mb.Grid2DPattern(5, 5)
pattern.scale(5)
for pos in pattern:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
monoLJ.visualize()
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_sphere = mb.SpherePattern(200)
pattern_sphere.scale(0.5)
for pos in pattern_sphere:
lj_particle = mb.clone(lj_proto)
pos[0]-=1.0
mb.translate(lj_particle, pos)
self.add(lj_particle)
pattern_disk = mb.DiskPattern(200)
pattern_disk.scale(0.5)
for pos in pattern_disk:
lj_particle = mb.clone(lj_proto)
pos[0]+=1.0
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
monoLJ.visualize()
import mbuild as mb
class SphereLJ(mb.Compound):
def __init__(self):
super(SphereLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_sphere = mb.SpherePattern(200)
pattern_sphere.scale(0.5)
for pos in pattern_sphere:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class DiskLJ(mb.Compound):
def __init__(self):
super(DiskLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_disk = mb.DiskPattern(200)
pattern_disk.scale(0.5)
for pos in pattern_disk:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
sphere = SphereLJ();
pos=[-1, 0, 0]
mb.translate(sphere, pos)
self.add(sphere)
disk = DiskLJ();
pos=[1, 0, 0]
mb.translate(disk, pos)
self.add(disk)
monoLJ = MonoLJ()
monoLJ.visualize()
import mbuild as mb
class SphereLJ(mb.Compound):
def __init__(self):
super(SphereLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_sphere = mb.SpherePattern(13)
pattern_sphere.scale(0.5)
for pos in pattern_sphere:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
sphere = SphereLJ();
pattern = mb.Grid3DPattern(3, 3, 3)
pattern.scale(10)
for pos in pattern:
lj_sphere = mb.clone(sphere)
mb.translate_to(lj_sphere, pos)
#shift the particle so the center of mass
#of the system is at the origin
mb.translate(lj_sphere, [-5,-5,-5])
self.add(lj_sphere)
monoLJ = MonoLJ()
monoLJ.visualize()
import mbuild as mb
import random
from numpy import pi
class CubeLJ(mb.Compound):
def __init__(self):
super(CubeLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern = mb.Grid3DPattern(2, 2, 2)
pattern.scale(1)
for pos in pattern:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
cube_proto = CubeLJ();
pattern = mb.Grid3DPattern(3, 3, 3)
pattern.scale(10)
rnd = random.Random()
rnd.seed(123)
for pos in pattern:
lj_cube = mb.clone(cube_proto)
mb.translate_to(lj_cube, pos)
#shift the particle so the center of mass
#of the system is at the origin
mb.translate(lj_cube, [-5,-5,-5])
mb.spin(lj_cube, rnd.uniform(0, 2 * pi), [1, 0, 0])
mb.spin(lj_cube, rnd.uniform(0, 2 * pi), [0, 1, 0])
mb.spin(lj_cube, rnd.uniform(0, 2 * pi), [0, 0, 1])
self.add(lj_cube)
monoLJ = MonoLJ()
monoLJ.visualize()
#save as xyz file
monoLJ.save('output.xyz')
#save as mol2
monoLJ.save('output.mol2')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: While this would work for defining a single molecule or very small system, this would not be efficient for large systems. Instead, the clone and translate operator can be used to facilitate automation. Below, we simply define a single prototype particle (lj_proto), which we then copy and translate about the system.
Step2: To simplify this process, mBuild provides several build-in patterning tools, where for example, Grid3DPattern can be used to perform this same operation. Grid3DPattern generates a set of points, from 0 to 1, which get stored in the variable "pattern". We need only loop over the points in pattern, cloning, translating, and adding to the system. Note, because Grid3DPattern defines points between 0 and 1, they must be scaled based on the desired system size, i.e., pattern.scale(2).
Step3: Larger systems can therefore be easily generated by toggling the values given to Grid3DPattern. Other patterns can also be generated using the same basic code, such as a 2D grid pattern
Step4: Points on a sphere can be generated using SpherePattern. Points on a disk using DisKPattern, etc.
Step5: We can also take advantage of the hierachical nature of mBuild to accomplish the same task more cleanly. Below we create a component that corresponds to the sphere (class SphereLJ), and one that corresponds to the disk (class DiskLJ), and then instantiate and shift each of these individually in the MonoLJ component.
Step6: Again, since mBuild is hierarchical, the pattern functions can be used to generate large systems of any arbitary component. For example, we can replicate the SphereLJ component on a regular array.
Step7: Several functions exist for rotating compounds. For example, the spin command allows a compound to be rotated, in place, about a specific axis (i.e., it considers the origin for the rotation to lie at the compound's center of mass).
Step8: Configurations can be dumped to file using the save command; this takes advantage of MDTraj and supports a range of file formats (see http
|
4,719
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import json
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer
# Download the data
! curl https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data --output adult.data
! curl https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test --output adult.test
# Define the format of your input data including unused columns (These are the columns from the census data files)
COLUMNS = (
'age',
'workclass',
'fnlwgt',
'education',
'education-num',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'capital-gain',
'capital-loss',
'hours-per-week',
'native-country',
'income-level'
)
# Categorical columns are columns that need to be turned into a numerical value to be used by scikit-learn
CATEGORICAL_COLUMNS = (
'workclass',
'education',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'native-country'
)
with open('./adult.data', 'r') as train_data:
raw_training_data = pd.read_csv(train_data, header=None, names=COLUMNS)
# Remove the column you are trying to predict ('income-level') from our features list
# Convert the Dataframe to a lists of lists
train_features = raw_training_data.drop('income-level', axis=1).values.tolist()
# Create our training labels list, convert the Dataframe to a lists of lists
train_labels = (raw_training_data['income-level'] == ' >50K').values.tolist()
with open('./adult.test', 'r') as test_data:
raw_testing_data = pd.read_csv(test_data, names=COLUMNS, skiprows=1)
# Remove the column we are trying to predict ('income-level') from our features list
# Convert the Dataframe to a lists of lists
test_features = raw_testing_data.drop('income-level', axis=1).values.tolist()
# Create our training labels list, convert the Dataframe to a lists of lists
test_labels = (raw_testing_data['income-level'] == ' >50K.').values.tolist()
categorical_pipelines = []
for i, col in enumerate(COLUMNS[:-1]):
if col in CATEGORICAL_COLUMNS:
# Create a scores array to get the individual categorical column.
# Example:
# data = [39, 'State-gov', 77516, 'Bachelors', 13, 'Never-married', 'Adm-clerical',
# 'Not-in-family', 'White', 'Male', 2174, 0, 40, 'United-States']
# scores = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
#
# Returns: [['State-gov']]
# Build the scores array
scores = [0] * len(COLUMNS[:-1])
# This column is the categorical column you want to extract.
scores[i] = 1
skb = SelectKBest(k=1)
skb.scores_ = scores
# Convert the categorical column to a numerical value
lbn = LabelBinarizer()
r = skb.transform(train_features)
lbn.fit(r)
# Create the pipeline to extract the categorical feature
categorical_pipelines.append(
('categorical-{}'.format(i), Pipeline([
('SKB-{}'.format(i), skb),
('LBN-{}'.format(i), lbn)])))
# Create pipeline to extract the numerical features
skb = SelectKBest(k=6)
# From COLUMNS use the features that are numerical
skb.scores_ = [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0]
categorical_pipelines.append(('numerical', skb))
# Combine all the features using FeatureUnion
preprocess = FeatureUnion(categorical_pipelines)
# Create the classifier
classifier = RandomForestClassifier()
# Transform the features and fit them to the classifier
classifier.fit(preprocess.transform(train_features), train_labels)
# Create the overall model as a single pipeline
pipeline = Pipeline([
('union', preprocess),
('classifier', classifier)
])
model = 'model.joblib'
joblib.dump(pipeline, model)
!ls -al model.joblib
print('Show a person that makes <=50K:')
print('\tFeatures: {0} --> Label: {1}\n'.format(test_features[0], test_labels[0]))
with open('less_than_50K.json', 'w') as outfile:
json.dump(test_features[0], outfile)
print('Show a person that makes >50K:')
print('\tFeatures: {0} --> Label: {1}'.format(test_features[3], test_labels[3]))
with open('more_than_50K.json', 'w') as outfile:
json.dump(test_features[3], outfile)
local_results = pipeline.predict(test_features)
local = pd.Series(local_results, name='local')
local[:10]
# Print the first 10 responses
for i, response in enumerate(local[:10]):
print('Prediction: {}\tLabel: {}'.format(response, test_labels[i]))
actual = pd.Series(test_labels, name='actual')
local_predictions = pd.Series(local_results, name='local')
pd.crosstab(actual, local_predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add code to download the data (in this case, using the publicly hosted data).
Step2: Read in the data
Step3: Load the training census dataset
Step4: Load the test census dataset
Step5: This is where your model code would go. Below is an example model using the census dataset.
Step6: Export the model to a file
Step7: Predictions
Step8: Use Python to make local predictions
Step9: [Optional] Verify Results
|
4,720
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q opencv-python
import os
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
import numpy as np
import cv2
from IPython import display
import math
# Load the model once from TF-Hub.
hub_handle = 'https://tfhub.dev/deepmind/mil-nce/s3d/1'
hub_model = hub.load(hub_handle)
def generate_embeddings(model, input_frames, input_words):
Generate embeddings from the model from video frames and input words.
# Input_frames must be normalized in [0, 1] and of the shape Batch x T x H x W x 3
vision_output = model.signatures['video'](tf.constant(tf.cast(input_frames, dtype=tf.float32)))
text_output = model.signatures['text'](tf.constant(input_words))
return vision_output['video_embedding'], text_output['text_embedding']
# @title Define video loading and visualization functions { display-mode: "form" }
# Utilities to open video files using CV2
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y:start_y+min_dim,start_x:start_x+min_dim]
def load_video(video_url, max_frames=32, resize=(224, 224)):
path = tf.keras.utils.get_file(os.path.basename(video_url)[-128:], video_url)
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = crop_center_square(frame)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]]
frames.append(frame)
if len(frames) == max_frames:
break
finally:
cap.release()
frames = np.array(frames)
if len(frames) < max_frames:
n_repeat = int(math.ceil(max_frames / float(len(frames))))
frames = frames.repeat(n_repeat, axis=0)
frames = frames[:max_frames]
return frames / 255.0
def display_video(urls):
html = '<table>'
html += '<tr><th>Video 1</th><th>Video 2</th><th>Video 3</th></tr><tr>'
for url in urls:
html += '<td>'
html += '<img src="{}" height="224">'.format(url)
html += '</td>'
html += '</tr></table>'
return display.HTML(html)
def display_query_and_results_video(query, urls, scores):
Display a text query and the top result videos and scores.
sorted_ix = np.argsort(-scores)
html = ''
html += '<h2>Input query: <i>{}</i> </h2><div>'.format(query)
html += 'Results: <div>'
html += '<table>'
html += '<tr><th>Rank #1, Score:{:.2f}</th>'.format(scores[sorted_ix[0]])
html += '<th>Rank #2, Score:{:.2f}</th>'.format(scores[sorted_ix[1]])
html += '<th>Rank #3, Score:{:.2f}</th></tr><tr>'.format(scores[sorted_ix[2]])
for i, idx in enumerate(sorted_ix):
url = urls[sorted_ix[i]];
html += '<td>'
html += '<img src="{}" height="224">'.format(url)
html += '</td>'
html += '</tr></table>'
return html
# @title Load example videos and define text queries { display-mode: "form" }
video_1_url = 'https://upload.wikimedia.org/wikipedia/commons/b/b0/YosriAirTerjun.gif' # @param {type:"string"}
video_2_url = 'https://upload.wikimedia.org/wikipedia/commons/e/e6/Guitar_solo_gif.gif' # @param {type:"string"}
video_3_url = 'https://upload.wikimedia.org/wikipedia/commons/3/30/2009-08-16-autodrift-by-RalfR-gif-by-wau.gif' # @param {type:"string"}
video_1 = load_video(video_1_url)
video_2 = load_video(video_2_url)
video_3 = load_video(video_3_url)
all_videos = [video_1, video_2, video_3]
query_1_video = 'waterfall' # @param {type:"string"}
query_2_video = 'playing guitar' # @param {type:"string"}
query_3_video = 'car drifting' # @param {type:"string"}
all_queries_video = [query_1_video, query_2_video, query_3_video]
all_videos_urls = [video_1_url, video_2_url, video_3_url]
display_video(all_videos_urls)
# Prepare video inputs.
videos_np = np.stack(all_videos, axis=0)
# Prepare text input.
words_np = np.array(all_queries_video)
# Generate the video and text embeddings.
video_embd, text_embd = generate_embeddings(hub_model, videos_np, words_np)
# Scores between video and text is computed by dot products.
all_scores = np.dot(text_embd, tf.transpose(video_embd))
# Display results.
html = ''
for i, words in enumerate(words_np):
html += display_query_and_results_video(words, all_videos_urls, all_scores[i, :])
html += '<br>'
display.HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: TF-Hub 모델 가져오기
Step4: 텍스트-비디오 검색 시연하기
|
4,721
|
<ASSISTANT_TASK:>
Python Code:
x = [51, 65, 56, 19, 11, 49, 81, 59, 45, 73]
max_val = 0
for element in x:
# ... now what?
pass
x = 5
if x < 5:
print("How did this happen?!") # Spoiler alert: this won't happen.
if x == 5:
print("Working as intended.")
x = 5
if x < 5:
print("How did this happen?!") # Spoiler alert: this won't happen.
else:
print("Correct.")
x = [51, 65, 56, 19, 11, 49, 81, 59, 45, 73]
max_val = 0
for element in x:
if max_val < element:
max_val = element
print("The maximum element is: {}".format(max_val))
student_grades = {
'Jen': 82,
'Shannon': 75,
'Natasha': 94,
'Benjamin': 48,
}
letter = ''
for student, grade in student_grades.items():
if grade >= 90:
letter = "A"
elif grade >= 80:
letter = "B"
elif grade >= 70:
letter = "C"
elif grade >= 60:
letter = "D"
else:
letter = "F"
print(student, letter)
x = [51, 65, 56, 19, 11, 49, 81, 59, 45, 73]
max_val = 81 # We've already found it!
second_largest = 0
True and True and True and True and True and True and False
True or True or True or True or True or True or False
False or False or False or False or False or False or True
(True and False) or (True or False)
for element in x:
if second_largest < element and element < max_val:
second_largest = element
print("The second-largest element is: {}".format(second_largest))
second_largest = 0
for element in x:
if second_largest < element:
if element < max_val:
second_largest = element
print("The second-largest element is: {}".format(second_largest))
numbers = [1, 2, 5, 6, 7, 9, 10]
for num in numbers:
if num == 2 or num == 4 or num == 6 or num == 8 or num == 10:
print("{} is an even number.".format(num))
import random
list_of_numbers = [i for i in range(10)] # Generates 10 random numbers, between 1 and 100.
if 13 not in list_of_numbers:
print("Aw man, my lucky number isn't here!")
import random
list_of_numbers = [i for i in range(10)] # Generates 10 random numbers, between 1 and 100.
if 13 in list_of_numbers:
print("Somehow the number 13 is in a list generated by range(10)")
def divide(x, y):
return x / y
divide(11, 0)
def divide_safe(x, y):
quotient = 0
try:
quotient = x / y
except ZeroDivisionError:
print("You tried to divide by zero. Why would you do that?!")
return quotient
divide_safe(11, 0)
import random # For generating random exceptions.
num = random.randint(0, 1)
try:
# code for something can cause multiple exceptions
pass
except NameError:
print("Caught a NameError!")
except ValueError:
print("Nope, it was actually a ValueError.")
import random # For generating random exceptions.
num = random.randint(0, 1)
try:
if num == 1:
raise NameError("This happens when you use a variable you haven't defined")
else:
raise ValueError("This happens when you try to multiply a string")
except (NameError, ValueError): # MUST have the parentheses!
print("Caught...well, some kinda error, not sure which.")
import random # For generating random exceptions.
num = random.randint(0, 1)
try:
if num == 1:
raise NameError("This happens when you use a variable you haven't defined")
else:
raise ValueError("This happens when you try to multiply a string")
except:
print("I caught something!")
import random # For generating random exceptions.
num = random.randint(0, 1)
try:
if num == 1:
raise NameError("This happens when you use a variable you haven't defined")
except:
print("I caught something!")
else:
print("HOORAY! Lucky coin flip!")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If we want to figure out the maximum value, we'll obviously need a loop to check each element of the list (which we know how to do), and a variable to store the maximum.
Step2: We also know we can check relative values, like max_val < element. If this evaluates to True, we know we've found a number in the list that's bigger than our current candidate for maximum value. But how do we execute code until this condition, and this condition alone?
Step3: In conjunction with if, we also have an else clause that we can use to execute whenever the if statement doesn't
Step4: This is great! We can finally finish computing the maximum element of a list!
Step5: Let's pause here and walk through that code.
Step6: We know the 90-100 range is an "A", 80-89 is a "B", and so on. How would we build a conditional to assign letter grades?
Step7: Ok, that's neat. But there's still one more edge case
Step8: Here's the rub
Step9: One False ruins the whole thing.
Step10: Figured it out?
Step11: (Order of operations works the same way!)
Step12: Let's step through the code.
Step13: ...but your code starts getting a little unwieldy with so many indentations.
Step14: In this contrived example, I've glued together a bunch of constraints. Obviously, these constraints are mutually exclusive; a number can't be equal to both 2 and 4 at the same time, so num == 2 and num == 4 would never evaluate to True. However, using or, only one of them needs to be True for the statement underneath to execute.
Step15: Notice a couple things here--
Step16: Nothing is printed in this case, since our conditional is asking if the number 13 was in the list. Which it's not.
Step17: D'oh! The user fed us a 0 for the denominator and broke our calculator. Meanie-face.
Step18: Now if our user tries to be snarky again--
Step19: No error, no crash! Just a "helpful" error message.
Step20: Also like conditionals, you can handle multiple errors simultaneously. If, like in the previous example, your code can raise multiple exceptions, but you want to handle them all the same way, you can stack them all in one except statement
Step21: If you're like me, and you're writing code that you know could raise one of several errors, but are too lazy to look up specifically what errors are possible, you can create a "catch-all" by just not specifying anything
Step22: Finally--and this is really getting into what's known as control flow (quite literally
|
4,722
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
from numpy import pi, sin, cos, linspace, exp, real, imag, abs, conj, meshgrid, log, log10, angle
from numpy.fft import fft, fftshift, ifft
from mpl_toolkits.mplot3d import axes3d
import BeamOptics as bopt
%matplotlib inline
b=.08*1e-3 # the slit width
a=.5*1e-3 # the slit spacing
k=2*pi/(795*1e-9) # longitudinal wavenumber
wt=0 # let time be zero
C=1 # unit amplitude
L=1.8 # distance from slits to CCD
d=.016 # distance from signal to LO at upstream end (used to calculate k_perp)
ccdwidth = 1300 # number of pixels
pixwidth = 20e-6 # pixel width (in meters)
y = linspace(-pixwidth*ccdwidth/2,pixwidth*ccdwidth/2,ccdwidth)
def alpha(y,a):
return k*a*y/(2*L)
def beta(y,b):
return k*b*y/(2*L)
def E_ds(y,a,b):
Double-slit field
# From Hecht p 458:
#return b*C*(sin(beta(y)) / beta(y)) * (sin(wt-k*L) + sin(wt-k*L+2*alpha(y)))
# drop the time-dep term as it will average away:
return 2*b*C*(sin(beta(y,b)) / beta(y,b)) * cos(alpha(y,a)) #* sin(wt - k*L + alpha(y))
def E_dg(y,a,b):
Double gaussian field
# The width needs to be small enough to see interference
# otherwise the beam doesn't diffract and shows no interference.
# We're using b for the gaussian width (i.e. equal to the slit width)
w=b
#return C*exp(1j*k*0.1*d*y/L)
return 5e-3*(bopt.gaussian_beam(0,y-a/2,L,E0=1,wavelambda=795e-9,w0=w,k=[0,0,k]) +
bopt.gaussian_beam(0,y+a/2,L,E0=1,wavelambda=795e-9,w0=w,k=[0,0,k]))
def E_lo(y,d):
Plane-wave LO beam incident at small angle, transverse wavenumber k*d*y/L
return C*exp(-1j*k*d*y/L)
plt.plot(y,abs(E_ds(y,a,b)))
plt.title("Double slit field")
plt.plot(y,abs(E_dg(y,a,b)))
plt.title("Double-Gaussian field")
def plotFFT(d,a,b):
Single function version of generating the FFT output
TotalField = E_dg(y,a,b)+E_lo(y,d)
TotalIntensity=TotalField*TotalField.conj()
plt.plot(abs(fft(TotalIntensity)),".-")
plt.ylim([0,1e-2])
plt.xlim([0,650])
plt.title("FFT output")
plotFFT(d=0.046,a=0.5e-3,b=0.08e-3)
plotFFT(d=0.035,a=0.5e-3,b=0.08e-3)
plotFFT(d=0.02,a=0.5e-3,b=0.08e-3)
def plotFFTds(d,a,b):
Single function version of generating the FFT output
TotalField = E_ds(y,a,b)+E_lo(y,d)
TotalIntensity=TotalField*TotalField.conj()
plt.plot(abs(fft(TotalIntensity)),".-")
plt.ylim([0,0.1])
plt.xlim([400,500])
plt.title("FFT output")
plotFFTds(d=0.025,a=0.5e-3,b=0.08e-3)
bopt.gaussian_beam?
# bopt.gaussian_beam(x, y, z, E0, wavelambda, w0, k)
# set to evaluate gaussian at L (full distance to CCD) with waist width of 2 cm
# using d=0.046 for agreement with experiment
d=0.046
E_lo_gauss = bopt.gaussian_beam(0,y,L,E0=1,wavelambda=795e-9,w0=0.02,k=[0,k*d/L,k])
plt.plot(y,abs(E_lo_gauss))
TotalIntensity=(E_dg(y,a,b)+E_lo_gauss) * (E_dg(y,a,b)+E_lo_gauss).conj()
plt.figure(figsize=(14,4))
plt.plot(y,TotalIntensity,".-")
#plt.xlim([-.002,0])
plt.plot(abs(fft(TotalIntensity)),".-")
plt.ylim([0,0.01]) # Had to lower the LO power quite a bit, and then zoom way in.
plt.xlim([0,650])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Double-slit model
Step4: Sanity check
Step6: Define a single function to explore the FFT
Step7: This agrees well with Matt's code using symbolic calculations. The main difference I see is in the size of the low-frequency peak. It's much smaller here than in his version.
Step8: d=0.02
Step10: Double slit is still very different
Step11: This does not agree with experimental results.
|
4,723
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# Create epochs, here for gradiometers + EOG only for simplicity
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.manifold import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Show event-related fields images
|
4,724
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import gym
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
env = gym.make('FrozenLake-v0')
tf.reset_default_graph()
#These lines establish the feed-forward part of the network used to choose actions
inputs1 = tf.placeholder(shape=[1,16],dtype=tf.float32)
W = tf.Variable(tf.random_uniform([16,4],0,0.01))
Qout = tf.matmul(inputs1,W)
predict = tf.argmax(Qout,1)
#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
nextQ = tf.placeholder(shape=[1,4],dtype=tf.float32)
loss = tf.reduce_sum(tf.square(nextQ - Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
init = tf.global_variables_initializer()
# Set learning parameters
y = .99
e = 0.1
num_episodes = 2000
#create lists to contain total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(num_episodes):
#Reset environment and get first new observation
s = env.reset()
rAll = 0
d = False
j = 0
#The Q-Network
while j < 99:
j+=1
#Choose an action by greedily (with e chance of random action) from the Q-network
a,allQ = sess.run([predict,Qout],feed_dict={inputs1:np.identity(16)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
#Get new state and reward from environment
s1,r,d,_ = env.step(a[0])
#Obtain the Q' values by feeding the new state through our network
Q1 = sess.run(Qout,feed_dict={inputs1:np.identity(16)[s1:s1+1]})
#Obtain maxQ' and set our target value for chosen action.
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0,a[0]] = r + y*maxQ1
#Train our network using target and predicted Q values
_,W1 = sess.run([updateModel,W],feed_dict={inputs1:np.identity(16)[s:s+1],nextQ:targetQ})
rAll += r
s = s1
if d == True:
#Reduce chance of random action as we train the model.
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
print("Percent of succesful episodes: " + str(sum(rList)/num_episodes) + "%")
plt.plot(rList)
plt.plot(jList)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the environment
Step2: The Q-Network Approach
Step3: Training the network
Step4: Some statistics on network performance
Step5: It also begins to progress through the environment for longer than chance aroudn the 750 mark as well.
|
4,725
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import somato
from mne.baseline import rescale
from mne.stats import bootstrap_confidence_interval
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# let's explore some frequency bands
iter_freqs = [
('Theta', 4, 7),
('Alpha', 8, 12),
('Beta', 13, 25),
('Gamma', 30, 45)
]
# set epoching parameters
event_id, tmin, tmax = 1, -1., 3.
baseline = None
# get the header to extract events
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
frequency_map = list()
for band, fmin, fmax in iter_freqs:
# (re)load the data to save memory
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg='grad', eog=True) # we just look at gradiometers
raw.load_data()
# bandpass filter
raw.filter(fmin, fmax, n_jobs=1, # use more jobs to speed up.
l_trans_bandwidth=1, # make sure filter params are the same
h_trans_bandwidth=1) # in each band and skip "auto" option.
# epoch
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline,
reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
# remove evoked response
epochs.subtract_evoked()
# get analytic signal (envelope)
epochs.apply_hilbert(envelope=True)
frequency_map.append(((band, fmin, fmax), epochs.average()))
del epochs
del raw
# Helper function for plotting spread
def stat_fun(x):
Return sum of squares.
return np.sum(x ** 2, axis=0)
# Plot
fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True)
colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 4))
for ((freq_name, fmin, fmax), average), color, ax in zip(
frequency_map, colors, axes.ravel()[::-1]):
times = average.times * 1e3
gfp = np.sum(average.data ** 2, axis=0)
gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0))
ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)
ax.axhline(0, linestyle='--', color='grey', linewidth=2)
ci_low, ci_up = bootstrap_confidence_interval(average.data, random_state=0,
stat_fun=stat_fun)
ci_low = rescale(ci_low, average.times, baseline=(None, 0))
ci_up = rescale(ci_up, average.times, baseline=(None, 0))
ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3)
ax.grid(True)
ax.set_ylabel('GFP')
ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),
xy=(0.95, 0.8),
horizontalalignment='right',
xycoords='axes fraction')
ax.set_xlim(-1000, 3000)
axes.ravel()[-1].set_xlabel('Time [ms]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: We create average power time courses for each frequency band
Step4: Now we can compute the Global Field Power
|
4,726
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error, mean_absolute_error
# initliase & fit model
model = SVR(C=1.5, kernel='linear')
model.fit(X_train, y_train)
# make prediction for test data
y_pred = model.predict(X_test)
# evaluate performance
print('RMSE:',mean_squared_error(y_test, y_pred, squared = False))
print('MAE:',mean_absolute_error(y_test, y_pred))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
4,727
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow==2.4.1
!pip install tensorflow-quantum
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
qubit = cirq.GridQubit(0, 0)
my_circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol('alpha'))
SVGCircuit(my_circuit)
pauli_x = cirq.X(qubit)
pauli_x
def my_expectation(op, alpha):
Compute ⟨Y(alpha)| `op` | Y(alpha)⟩
params = {'alpha': alpha}
sim = cirq.Simulator()
final_state_vector = sim.simulate(my_circuit, params).final_state_vector
return op.expectation_from_state_vector(final_state_vector, {qubit: 0}).real
my_alpha = 0.3
print("Expectation=", my_expectation(pauli_x, my_alpha))
print("Sin Formula=", np.sin(np.pi * my_alpha))
def my_grad(obs, alpha, eps=0.01):
grad = 0
f_x = my_expectation(obs, alpha)
f_x_prime = my_expectation(obs, alpha + eps)
return ((f_x_prime - f_x) / eps).real
print('Finite difference:', my_grad(pauli_x, my_alpha))
print('Cosine formula: ', np.pi * np.cos(np.pi * my_alpha))
expectation_calculation = tfq.layers.Expectation(
differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))
expectation_calculation(my_circuit,
operators=pauli_x,
symbol_names=['alpha'],
symbol_values=[[my_alpha]])
sampled_expectation_calculation = tfq.layers.SampledExpectation(
differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))
sampled_expectation_calculation(my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=[[my_alpha]])
# Make input_points = [batch_size, 1] array.
input_points = np.linspace(0, 5, 200)[:, np.newaxis].astype(np.float32)
exact_outputs = expectation_calculation(my_circuit,
operators=pauli_x,
symbol_names=['alpha'],
symbol_values=input_points)
imperfect_outputs = sampled_expectation_calculation(my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=input_points)
plt.title('Forward Pass Values')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.plot(input_points, exact_outputs, label='Analytic')
plt.plot(input_points, imperfect_outputs, label='Sampled')
plt.legend()
# Gradients are a much different story.
values_tensor = tf.convert_to_tensor(input_points)
with tf.GradientTape() as g:
g.watch(values_tensor)
exact_outputs = expectation_calculation(my_circuit,
operators=pauli_x,
symbol_names=['alpha'],
symbol_values=values_tensor)
analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)
with tf.GradientTape() as g:
g.watch(values_tensor)
imperfect_outputs = sampled_expectation_calculation(
my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=values_tensor)
sampled_finite_diff_gradients = g.gradient(imperfect_outputs, values_tensor)
plt.title('Gradient Values')
plt.xlabel('$x$')
plt.ylabel('$f^{\'}(x)$')
plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')
plt.plot(input_points, sampled_finite_diff_gradients, label='Sampled')
plt.legend()
# A smarter differentiation scheme.
gradient_safe_sampled_expectation = tfq.layers.SampledExpectation(
differentiator=tfq.differentiators.ParameterShift())
with tf.GradientTape() as g:
g.watch(values_tensor)
imperfect_outputs = gradient_safe_sampled_expectation(
my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=values_tensor)
sampled_param_shift_gradients = g.gradient(imperfect_outputs, values_tensor)
plt.title('Gradient Values')
plt.xlabel('$x$')
plt.ylabel('$f^{\'}(x)$')
plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')
plt.plot(input_points, sampled_param_shift_gradients, label='Sampled')
plt.legend()
pauli_z = cirq.Z(qubit)
pauli_z
test_value = 0.
print('Finite difference:', my_grad(pauli_z, test_value))
print('Sin formula: ', -np.pi * np.sin(np.pi * test_value))
sum_of_outputs = tfq.layers.Expectation(
differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))
sum_of_outputs(my_circuit,
operators=[pauli_x, pauli_z],
symbol_names=['alpha'],
symbol_values=[[test_value]])
test_value_tensor = tf.convert_to_tensor([[test_value]])
with tf.GradientTape() as g:
g.watch(test_value_tensor)
outputs = sum_of_outputs(my_circuit,
operators=[pauli_x, pauli_z],
symbol_names=['alpha'],
symbol_values=test_value_tensor)
sum_of_gradients = g.gradient(outputs, test_value_tensor)
print(my_grad(pauli_x, test_value) + my_grad(pauli_z, test_value))
print(sum_of_gradients.numpy())
class MyDifferentiator(tfq.differentiators.Differentiator):
A Toy differentiator for <Y^alpha | X |Y^alpha>.
def __init__(self):
pass
def get_gradient_circuits(self, programs, symbol_names, symbol_values):
Return circuits to compute gradients for given forward pass circuits.
Every gradient on a quantum computer can be computed via measurements
of transformed quantum circuits. Here, you implement a custom gradient
for a specific circuit. For a real differentiator, you will need to
implement this function in a more general way. See the differentiator
implementations in the TFQ library for examples.
# The two terms in the derivative are the same circuit...
batch_programs = tf.stack([programs, programs], axis=1)
# ... with shifted parameter values.
shift = tf.constant(1/2)
forward = symbol_values + shift
backward = symbol_values - shift
batch_symbol_values = tf.stack([forward, backward], axis=1)
# Weights are the coefficients of the terms in the derivative.
num_program_copies = tf.shape(batch_programs)[0]
batch_weights = tf.tile(tf.constant([[[np.pi/2, -np.pi/2]]]),
[num_program_copies, 1, 1])
# The index map simply says which weights go with which circuits.
batch_mapper = tf.tile(
tf.constant([[[0, 1]]]), [num_program_copies, 1, 1])
return (batch_programs, symbol_names, batch_symbol_values,
batch_weights, batch_mapper)
custom_dif = MyDifferentiator()
custom_grad_expectation = tfq.layers.Expectation(differentiator=custom_dif)
# Now let's get the gradients with finite diff.
with tf.GradientTape() as g:
g.watch(values_tensor)
exact_outputs = expectation_calculation(my_circuit,
operators=[pauli_x],
symbol_names=['alpha'],
symbol_values=values_tensor)
analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)
# Now let's get the gradients with custom diff.
with tf.GradientTape() as g:
g.watch(values_tensor)
my_outputs = custom_grad_expectation(my_circuit,
operators=[pauli_x],
symbol_names=['alpha'],
symbol_values=values_tensor)
my_gradients = g.gradient(my_outputs, values_tensor)
plt.subplot(1, 2, 1)
plt.title('Exact Gradient')
plt.plot(input_points, analytic_finite_diff_gradients.numpy())
plt.xlabel('x')
plt.ylabel('f(x)')
plt.subplot(1, 2, 2)
plt.title('My Gradient')
plt.plot(input_points, my_gradients.numpy())
plt.xlabel('x')
# Create a noisy sample based expectation op.
expectation_sampled = tfq.get_sampled_expectation_op(
cirq.DensityMatrixSimulator(noise=cirq.depolarize(0.01)))
# Make it differentiable with your differentiator:
# Remember to refresh the differentiator before attaching the new op
custom_dif.refresh()
differentiable_op = custom_dif.generate_differentiable_op(
sampled_op=expectation_sampled)
# Prep op inputs.
circuit_tensor = tfq.convert_to_tensor([my_circuit])
op_tensor = tfq.convert_to_tensor([[pauli_x]])
single_value = tf.convert_to_tensor([[my_alpha]])
num_samples_tensor = tf.convert_to_tensor([[5000]])
with tf.GradientTape() as g:
g.watch(single_value)
forward_output = differentiable_op(circuit_tensor, ['alpha'], single_value,
op_tensor, num_samples_tensor)
my_gradients = g.gradient(forward_output, single_value)
print('---TFQ---')
print('Foward: ', forward_output.numpy())
print('Gradient:', my_gradients.numpy())
print('---Original---')
print('Forward: ', my_expectation(pauli_x, my_alpha))
print('Gradient:', my_grad(pauli_x, my_alpha))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 计算梯度
Step2: 安装 TensorFlow Quantum:
Step3: 现在,导入 TensorFlow 和模块依赖项:
Step4: 1. 准备工作
Step5: 以及可观测对象:
Step7: 所用算子为 $⟨Y(\alpha)| X | Y(\alpha)⟩ = \sin(\pi \alpha)$
Step8: 如果定义 $f_{1}(\alpha) = ⟨Y(\alpha)| X | Y(\alpha)⟩$,则 $f_{1}^{'}(\alpha) = \pi \cos(\pi \alpha)$。请参见下例:
Step9: 2. 对微分器的需求
Step10: 但是,如果您改为基于采样(在真实设备上进行)估计期望值,则值可能会有所变化。这意味着您的估计方法并不完善:
Step11: 涉及到梯度时,这会迅速加剧造成严重的准确率问题:
Step12: 在这里可以看到,尽管有限差分公式在解析示例中可以快速计算出梯度本身,但当涉及到基于采样的方法时,却产生了大量噪声。必须使用更细致的技术来确保可以计算出良好的梯度。接下来,您将了解一种速度缓慢而不太适用于解析期望梯度计算的技术,但该技术在基于实际样本的真实示例中却有着出色的表现:
Step13: 从上面可以看到,某些微分器最好用于特定的研究场景。通常,在更为“真实”的环境下测试或实现算法时,基于样本的较慢方法在面对设备噪声等问题时鲁棒性更佳,因此是理想的微分器。诸如有限差分之类的较快方法非常适合面向解析计算且需要更高吞吐量的场景,但尚未考虑算法在实际设备上是否可行。
Step14: 如果此可观测对象同样用于之前的电路,则 $f_{2}(\alpha) = ⟨Y(\alpha)| Z | Y(\alpha)⟩ = \cos(\pi \alpha)$ 且 $f_{2}^{'}(\alpha) = -\pi \sin(\pi \alpha)$。快速检查:
Step15: 结果匹配(足够接近)。
Step16: 在这里可以看到,第一个条目是相对于 Pauli X 的期望,第二个条目是相对于 Pauli Z 的期望。现在,梯度计算方法如下:
Step19: 现在,您已验证每个可观测对象的梯度之和即为 $\alpha$ 的梯度。所有 TensorFlow Quantum 微分器均支持此行为,且此行为在与其余 TensorFlow 的兼容性方面起着至关重要的作用。
Step20: Differentiator 基类使用从 get_gradient_circuits 返回的分量来计算导数,如上面的参数转移公式所示。现在,这个新的微分器可以与现有 tfq.layer 对象一起使用:
Step21: 现在,可以使用这个新的微分器来生成可微运算。
|
4,728
|
<ASSISTANT_TASK:>
Python Code:
import ga4gh_client.client as client
c = client.HttpClient("http://1kgenomes.ga4gh.org")
counter = 0
for read_group_set in c.search_read_group_sets(dataset_id="WyIxa2dlbm9tZXMiXQ"):
counter += 1
if counter < 4:
print "Read Group Set: {}".format(read_group_set.name)
print "id: {}".format(read_group_set.id)
print "dataset_id: {}".format(read_group_set.dataset_id)
print "Aligned Read Count: {}".format(read_group_set.stats.aligned_read_count)
print "Unaligned Read Count: {}\n".format(read_group_set.stats.unaligned_read_count)
for read_group in read_group_set.read_groups:
print " Read group:"
print " id: {}".format(read_group.id)
print " Name: {}".format(read_group.name)
print " Description: {}".format(read_group.description)
print " Biosample Id: {}\n".format(read_group.bio_sample_id)
else:
break
read_group_set = c.get_read_group_set(read_group_set_id="WyIxa2dlbm9tZXMiLCJyZ3MiLCJOQTE5Njc4Il0")
print "Read Group Set: {}".format(read_group_set.name)
print "id: {}".format(read_group_set.id)
print "dataset_id: {}".format(read_group_set.dataset_id)
print "Aligned Read Count: {}".format(read_group_set.stats.aligned_read_count)
print "Unaligned Read Count: {}\n".format(read_group_set.stats.unaligned_read_count)
for read_group in read_group_set.read_groups:
print " Read Group: {}".format(read_group.name)
print " id: {}".format(read_group.bio_sample_id)
print " bio_sample_id: {}\n".format(read_group.bio_sample_id)
for read_group in read_group_set.read_groups:
print "Alignment from {}\n".format(read_group.name)
alignment = c.search_reads(read_group_ids=[read_group.id], start=0, end=1000000, reference_id="WyJOQ0JJMzciLCIxIl0").next()
print " id: {}".format(alignment.id)
print " fragment_name: {}".format(alignment.fragment_name)
print " aligned_sequence: {}\n".format(alignment.aligned_sequence)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Search read group sets
Step2: Note
Step3: Note, like in the previous example. Only a selected amount of parameters are selected for illustration, the data returned by the server is far richer, this format is only to have a more aesthetic presentation.
|
4,729
|
<ASSISTANT_TASK:>
Python Code:
import math
if __name__== ' __main __' :
n = 12
print(math . sqrt(n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
4,730
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import chap01soln
resp = chap01soln.ReadFemResp()
import thinkstats2
pmf = thinkstats2.Pmf(resp.numkdhh)
pmf
import thinkplot
thinkplot.Pmf(pmf, label='numkdhh')
thinkplot.Show()
def BiasPmf(pmf, label=''):
Returns the Pmf with oversampling proportional to value.
If pmf is the distribution of true values, the result is the
distribution that would be seen if values are oversampled in
proportion to their values; for example, if you ask students
how big their classes are, large classes are oversampled in
proportion to their size.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
biased = BiasPmf(pmf, label='biased')
biased
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, biased])
thinkplot.Show()
pmf.Mean()
biased.Mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a PMF of <tt>numkdhh</tt>, the number of children under 18 in the respondent's household.
Step2: Display the PMF.
Step4: Define <tt>BiasPmf</tt>.
Step5: Make a the biased Pmf of children in the household, as observed if you surveyed the children instead of the respondents.
Step6: Display the actual Pmf and the biased Pmf on the same axes.
Step7: Compute the means of the two Pmfs.
|
4,731
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import xlrd #With pandas
import matplotlib.pyplot as plt
import pandas as pd
#Exponentiation
print(4 ** 4)
#Types and converstion
mInt = 6
mFloat = .4
mString = "Hey"
mConversion = str(mFloat)
print (mInt, mFloat, mString, mConversion, type(mConversion))
a = "is"
b = "nice"
my_list = [["my", "nested", "list"], a, b]
print (my_list)
print (my_list[-1], " == ", my_list [2])
#Slicing and dicing
x = ["0", "1", "2", "3"]
print (x[1:3]) #end border is exclusif
print (x[:2])
x = [["a", "b", "c"],
["d", "e", "f"],
["g", "h", "i"]]
print(x[2][0])
print(x[2][:2])#Intersection of both index
x = x + [["j", "k", "l"]] #add a sublist with double [[]]
print (x)
# Create areas_copy
X_pointer = x #Point to the same list (memory adress)
y = list(x) #Y is an other list
del(x[1])
print (y)
z = [11.25, 18.0, 20.0]
# Sort full in descending order: full_sorted
full_sorted = sorted (z, reverse=True)
# Print out full_sorted
print(full_sorted)
#Search index
print(z.index(20.0))
# Print out how often 14.5 appears in areas
print (z.count(18.0))
# house list of lists
house = [["hallway", 11.25],
["kitchen", 18.0],
["living room", 20.0],
["bedroom", 10.75],
["bathroom", 9.50]]
# Build a for loop from scratch
for x,y in house:
print("the " + str(x) + " is " + str(y) + " sqm")
#to get the index
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Code the for loop
for index, area in enumerate(areas) :
print("room " + str(index) + ": " + str(area))
# Import the numpy package as np
import numpy as np
# Create list baseball
baseball = [180, 215, 210, 210, 188, 176, 209, 200]
# Create a Numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
print (baseball)
print (np_baseball)
#Perform operation on all datas
print (np_baseball * 10)
#Perform condition
print (np_baseball [np_baseball < 200]) #the conditition is creating an boolean array
#So we can get indexes from one np.array to slect value in a second one
#Compare two array one by one element
my_house = np.array([18.0, 20.0, 10.75, 9.50])
your_house = np.array([14.0, 24.0, 14.25, 9.0])
print(my_house < your_house)
# my_house greater than 18.5 or smaller than 10
print(np.logical_or(my_house > 18.5,
my_house < 10))
#Load from txt file
digits = np.loadtxt(file,
delimiter=',',
skiprows=1 #If the first row is a header
)
#plt.scatter(data_float[:, 0], data_float[:, 1]) # premiere colonne indice, seconde les valeurs
data = np.genfromtxt('titanic.csv', delimiter=',', names=True, dtype=None) #names means the first row is the label
# Create baseball, a list of lists
baseball = [[180, 78.4],
[215, 102.7],
[210, 98.5],
[188, 75.2]]
# Create a 2D Numpy array from baseball: np_baseball
np_baseball = np.array (baseball)
# Print out the shape of np_baseball
print (np_baseball.shape)
# Print out the 4th row of np_baseball
print (np_baseball[3,:])
# Select the entire second column of np_baseball: np_weight
print (np_baseball[:,1])
#Appllying a filter on an array
conversion = np.array([10, 1000])
# Print out product of np_baseball and conversion
print ( np_baseball * conversion)
# Print out the mean of height
print( np.mean(np_baseball[:,0]))
# Print out the median of weight
print( np.median(np_baseball[:,1]))
#Are these columns correlated ?
print(np.corrcoef(np_baseball[:,0],np_baseball[:,1]))
# For loop over np_height
np_height = np.array ([180, 215, 210, 210, 188, 176, 209, 200])
for l in np_height:
print (str(l) + " inches")
# Create a 2D Numpy array from baseball: np_baseball
baseball = [[180, 78.4],
[215, 102.7],
[210, 98.5],
[188, 75.2]]
np_baseball = np.array (baseball)
# For loop over np_baseball: every elements one by line
for l in np.nditer(np_baseball):
print (l)
np.random.seed(123)
# Use randint() to simulate a dice
print(np.random.randint(1, 7))
import matplotlib.pyplot as plt
import numpy as np
x = np.array ( [1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016])
y = np.array ( [0.10000000000000001, 0.40000000000000002, 1.0999999999999999, 2.4000000000000008, 7.9999999999999876, 14.499999999999979, 30.700000000000152, 64.600000000000421, 109.19999999999807, 218.9999999999977, 371.79999999999768, 811.90000000000293, 1695.6000000000022, 2789.7999999999979, 4949.3999999999842, 12152.599999999993, 22639.799999999974, 32434.200000000077, 58928.000000000015, 123415.90000000002, 223653.8000000001, 273763.70000000042, 362651.79999999981, 567354.00000000012])
y = y * 1E15
plt.plot(x,y) #Plot line
plt.show()
plt.scatter(x,y) #Plotpoint
plt.show()
plt.hist(y, bins=20) #Histogram with 20 rectangles
plt.show()
col = {
'1993':'red',
'Europe':'green',
'2014':'blue',
'2015':'yellow',
'Oceania':'black'
}
plt.scatter(x, y,
alpha=.8, #Oppacity
s=(x-1990) **2 #Size of bubble: can be a np_array (no sens in this example)
#c=col #Change the bubble's color depending on the value (TODO)
)
plt.yscale('log') #Log scale on y
plt.xlabel("Years")
plt.ylabel("Computing power FLOP/S")
plt.title ("Analysis of Top500 performance \n(Sum of the 500 supercomputer's performance)")
plt.grid(True)
#Annotation
plt.text(2005, 1E17, 'These bubbles have no sense!')
#Arrow
texteX1 = 2000
texteY1 = 1E15
flecheX1 = 2000
flecheY1 = 1E16*4
plt.annotate('Nice bubble here',
xy=(flecheX1, flecheY1), xycoords='data',
xytext=(texteX1, texteY1), textcoords='data',
arrowprops=dict(arrowstyle="->",
linewidth = 5.,
color = 'red'),
)
#Set custome min and max value for x and y axis.
x1,x2,y1,y2 = plt.axis()
plt.axis((1990,x2,y1,y2))
# Definition of tick_val and tick_lab
tick_val = [1E12,1E15,1E18, 1E21]
tick_lab = ['GigaFlops','PetaFlops','ExaFlops', 'ZettaFlops']
plt.yticks(tick_val, tick_lab)
plt.show()
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Add italy to europe
europe['italy'] = 'rome'
# Remove australia
del europe['germany']
# Print out the keys in europe
print(europe.keys())
# Print out value that belongs to key 'norway'
print(europe['norway'])
#Iterate over europe
for key, value in europe.items() :
print("the capital of " + key + " is " + str(value))
# Dictionary of dictionaries
europe = { 'spain': { 'capital':'madrid', 'population':46.77 },
'france': { 'capital':'paris', 'population':66.03 },
'germany': { 'capital':'berlin', 'population':80.62 },
'norway': { 'capital':'oslo', 'population':5.084 } }
# Add data to europe under key 'italy'
data = { 'capital':'rome', 'population':59.83}
europe ['italy'] = data
print (europe)
import pandas as pd
file = "cars.csv"
#read a file
cars = pd.read_csv(file, index_col = 0)
#read and clean
data = pd.read_csv(file,
sep='\t',
comment='#', #Char reprensenting a comment in the data file
na_values=['Nothing'] #Value that we want to be NaN
)
#4th first lines
print (cars.head())
# Assign spreadsheet filename: file
file = "battledeath.xlsx"
# Load spreadsheet: xl
xl = pd.ExcelFile(file)
# Print sheet names
print(xl.sheet_names)
#Select a sheet as a DataFrame
df1 = xl.parse('2002') #By sheet Name
df2 = xl.parse(0) #By sheet Index
# Print out country column as Pandas Series
print (cars['country'], "\n")
# Print out country column as Pandas DataFrame
print (cars[['country']])
# Print out DataFrame with country and drives_right columns
print (cars[['country', 'drives_right']])
# Print out first 3 lines
print(cars[0:3])
# Without Index
print(cars.iloc[0:3].to_string(index=False) )
# Print out observations for Australia and Egypt
print (cars , "\n --- --- ---")
#Line Selection by label with [[lines],[columns]]
print (cars.loc[['AUS', 'EG']], "\n --- --- ---")
print(cars.loc['MOR', 'drives_right'] , "\n --- --- ---")
print(cars.loc[['RU', 'MOR'], ['country', 'drives_right']], "\n --- --- ---")
#iloc: get with integer
print (cars.iloc[:,1], "\n --- --- ---") #All lines, only first column
# Print out drives_right value of Morocco
print(cars.loc['MOR', 'drives_right'])
# Print sub-DataFrame
print(cars.loc[['RU', 'MOR'], ['country', 'drives_right']])
#print une colonne
print(cars.loc[:,'drives_right'])
# Print out cars_per_cap and drives_right as DataFrame
print(cars.loc[:, ['cars_per_cap', 'drives_right']])
# Create medium: observations with cars_per_cap between 100 and 500
cpc = cars['cars_per_cap']
between = np.logical_and(cpc > 100, cpc < 500)
medium = cars[between]
print (medium)
# Iterate over rows of cars
for lab, row in cars.iterrows() :
print ("---->" + str(lab))
#add a column
cars.loc[lab, "COUNTRY"] = (cars.loc[lab, "country"]).upper()
#More efficient version with .apply function
cars["smallC"] = cars["country"].apply(str.lower)
print (cars)
#Plotting two plots on the same figure
pp = cars.plot(x='country', y='cars_per_cap', kind='bar')
cars.plot(x='country', y='cars_per_cap', ax=pp) #Refer to the same figure with ax=pp
plt.show()
# Read & print the first 3 lines
with open('cars.csv') as file:
print(file.readline())
with open('data.pkl', 'rb') as file: #b because file is a bytestream. Not human readable
d = pickle.load(file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basics
Step2: Lists
Step3: Loop
Step4: Enumerate
Step5: Numpy
Step6: Importing data command
Step7: Importing data array with differents type
Step8: 2d Numpy Array
Step9: Some statistics tricks
Step10: Loop 1d and 2d
Step11: Matplotlib
Step12: Type of plot
Step13: Options
Step14: Dictionaries
Step15: Loop with .items()
Step16: PANDAS
Step17: Opening Excel files
Step18: DataFrame
Step19: Loc et iloc
Step20: multiple selection
Step21: Boolean Selection
Step22: Loop
Step23: Plot Dataframe
Step24: IMPORTING DATA
Step25: Pickle Package
|
4,732
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import mne
from mne.epochs import equalize_epoch_counts
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
event_id = 1 # L auditory
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
event_id = 3 # L visual
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
equalize_epoch_counts([epochs1, epochs2])
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
sample_vertices = [s['vertno'] for s in inverse_operator['src']]
# Let's average and compute inverse, resampling to speed things up
evoked1 = epochs1.average()
evoked1.resample(50, npad='auto')
condition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)
evoked2 = epochs2.average()
evoked2.resample(50, npad='auto')
condition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition1.crop(0, None)
condition2.crop(0, None)
tmin = condition1.tmin
tstep = condition1.tstep * 1000 # convert to milliseconds
n_vertices_sample, n_times = condition1.data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 2) * 10
X[:, :, :, 0] += condition1.data[:, :, np.newaxis]
X[:, :, :, 1] += condition2.data[:, :, np.newaxis]
# Read the source space we are morphing to
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir).morph_mat
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 2)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)
X = np.abs(X) # only magnitude
X = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast
print('Computing connectivity.')
connectivity = mne.spatial_src_connectivity(src)
# Note that X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions
X = np.transpose(X, [2, 1, 0])
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.001
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=1,
threshold=t_threshold, buffer_size=None,
verbose=True)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot(
hemi='both', views='lateral', subjects_dir=subjects_dir,
time_label='temporal extent (ms)', size=(800, 800),
smoothing_steps=5, clim=dict(kind='value', pos_lims=[0, 1, 40]))
# brain.save_image('clusters.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Read epochs for all channels, removing a bad one
Step3: Transform to source space
Step4: Transform to common cortical space
Step5: It's a good idea to spatially smooth the data, and for visualization
Step6: Finally, we want to compare the overall activity levels in each condition,
Step7: Compute statistic
Step8: Visualize the clusters
|
4,733
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import YouTubeVideo
# Title: Max Planck Solves the Ultraviolet Catastrophe for Blackbody Radiation | Doc Physics
# Author: Doc Schuster
YouTubeVideo('H-7f-3OAXm0')
%%latex
\begin{aligned}
B_{\lambda}(\lambda, T) = \frac{2hc^2}{\lambda^5} \frac{1}{e^{\frac{hc}{\lambda k_B T}} - 1}
\end{aligned}
import numpy as np
def planck(wavelength, temp):
Return the emitted radiation from a blackbody of a given temp and wavelength
Args:
wavelength (float): wavelength (m)
temp (float): temperature of black body (Kelvin)
Returns:
float: spectral radiance (W / (sr m^3))
k_b = 1.3806488e-23 # J/K Boltzmann constant
h = 6.626070040e-34 # J s - Planck's constant
c = 3e8 # m/s - speed of light
return ((2 * h * c ** 2) / wavelength ** 5 *
1 / (np.exp(h * c / (wavelength * k_b * temp)) - 1))
for temp in (3000, 4000, 5000):
rad = planck(0.5e-6, temp)
rad_kW_per_sr_m2_nm = rad / 1e3 / 1e9 # convert from W to kW and m to nm
print('%.3f K: %.5f kW/(sr m^2 nm)' % (temp, rad_kW_per_sr_m2_nm))
# Import and alias to "plt"
import matplotlib.pyplot as plt
# Calculate
wavelength = np.linspace(1e-7, 3e-6, 1000)
temp = np.array([3000, 4000, 5000])
rad = np.zeros((wavelength.size, temp.size), dtype=np.float)
for i, t in enumerate(temp):
rad[:, i] = planck(wavelength, t)
% matplotlib nbagg
# Plot
text_x = wavelength[rad.argmax(axis=0)] * 1e6
text_y = rad.max(axis=0) / 1e3 / 1e9
temp_str = ['%.2f K' % t for t in temp]
fig, ax = plt.subplots()
ax.plot(wavelength * 1e6, rad / 1e3 / 1e9)
for _x, _y, _temp in zip(text_x, text_y, temp_str):
ax.text(_x, _y, _temp, ha='center')
plt.legend(labels=['%.2f K' % t for t in temp])
plt.xlabel(r'Wavelength ($\mu m$)')
plt.ylabel(r'Spectral radiance ($kW \cdot sr^{-1} \cdot m^{-2} \cdot nm^{-1}$)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Equations
Step3: You can also use "line magics" to write LaTeX inline in Markdown cells
Step4: 5. Visualization
|
4,734
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
sys.path.append(os.path.abspath('../../main/python'))
from thalesians.tsa.simulation import xtimes, times
for t in xtimes(0, 5): print(t)
xtimes(0, 5)
list(xtimes(0, 5))
times(0, 5)
list(range(0, 5))
ts = []
for t in xtimes(start=1):
ts.append(t)
if len(ts) == 5: break
ts
times(start=-3., stop=5., step=2.5)
import datetime as dt
times(dt.date(2017, 5, 5), dt.date(2017, 5, 10))
times(dt.time(8), dt.time(12), dt.timedelta(minutes=30))
times(dt.datetime(2017, 5, 10), dt.datetime(2017, 5, 5), dt.timedelta(days=-1))
import thalesians.tsa.randomness as rnd
times(0., 10., step=lambda x: rnd.exponential(2.5))
times(dt.datetime(2017, 5, 5, 8),
dt.datetime(2017, 5, 5, 12),
lambda x: rnd.exponential(dt.timedelta(minutes=30)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: xtimes
Step2: Since xtimes is a generator, the times are computed lazily
Step3: To get hold of them all at once, you need to use something like
Step4: or the shortcut
Step5: which amounts to the same thing. So far, we haven't seen anything that would justify xtimes's existence
Step6: In order to understand why we may need xtimes and not range we need to delve deeper into the semantics of xtimes.
Step7: Perhaps more importantly, start and stop don't have to be ints. They may be floats, dates, times, datetimes. Respectively, stop may be an int, float, or a timedelta, for example
Step8: (By default, the step is 1, 1., or timedelta(days=1), depending on the types of start and stop.)
Step9: The flexibility of xtimes/times enables one to represent time using a data type that is suitable for the particular simulation and/or modelling task.
Step10: (Recall that the lengths of times between the occurrences of a Poisson process are exponentially distributed.)
|
4,735
|
<ASSISTANT_TASK:>
Python Code:
from pymatgen.ext.matproj import MPRester
from pymatgen.electronic_structure.core import Spin
#This initiliazes the Rest connection to the Materials Project db. Put your own API key if needed.
a = MPRester()
#load the band structure from mp-3748, CuAlO2 from the MP db
bs = a.get_bandstructure_by_material_id("mp-3748")
#is the material a metal (i.e., the fermi level cross a band)
print(bs.is_metal())
#print information on the band gap
print(bs.get_band_gap())
#print the energy of the 20th band and 10th kpoint
print(bs.bands[Spin.up][20][10])
#print energy of direct band gap
print(bs.get_direct_band_gap())
#print information on the vbm
print(bs.get_vbm())
%matplotlib inline
from pymatgen.electronic_structure.plotter import BSPlotter
plotter = BSPlotter(bs)
plotter.get_plot().show()
plotter.plot_brillouin()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We print some information about the band structure
Step2: Here, we plot the bs object. By default for an insulator we have en energy limit of cbm+4eV and vbm-4 eV
Step3: We plot the Brillouin zone with the path which was used for the band structure
|
4,736
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
# If our large test file is available, use it. Otherwise, use file generated
# from toy_mstis_2_run.ipynb. This is so the notebook can be used in testing.
import os
test_file = "../toy_mstis_1k_OPS1.nc"
filename = test_file if os.path.isfile(test_file) else "mstis.nc"
print("Using file `"+ filename + "` for analysis")
%matplotlib inline
import matplotlib.pyplot as plt
import openpathsampling as paths
import numpy as np
%%time
storage = paths.Storage(filename, mode='r')
# the following works with the old file we use in testing; the better way is:
# mstis = storage.networks['mstis'] # when objects are named, use the name
mstis = storage.networks.first
mstis.hist_args['max_lambda'] = { 'bin_width' : 0.05, 'bin_range' : (0.0, 0.5) }
mstis.hist_args['pathlength'] = { 'bin_width' : 5, 'bin_range' : (0, 150) }
%%time
mstis.rate_matrix(storage.steps)
stateA = storage.volumes["A"]
stateB = storage.volumes["B"]
stateC = storage.volumes["C"]
tcp_AB = mstis.transitions[(stateA, stateB)].tcp
tcp_AC = mstis.transitions[(stateA, stateC)].tcp
tcp_BC = mstis.transitions[(stateB, stateC)].tcp
tcp_BA = mstis.transitions[(stateB, stateA)].tcp
tcp_CA = mstis.transitions[(stateC, stateA)].tcp
tcp_CB = mstis.transitions[(stateC, stateB)].tcp
plt.plot(tcp_AB.x, tcp_AB, '-r')
plt.plot(tcp_CA.x, tcp_CA, '-k')
plt.plot(tcp_BC.x, tcp_BC, '-b')
plt.plot(tcp_AC.x, tcp_AC, '-g') # same as tcp_AB in MSTIS
plt.plot(tcp_AB.x, np.log(tcp_AB), '-r')
plt.plot(tcp_CA.x, np.log(tcp_CA), '-k')
plt.plot(tcp_BC.x, np.log(tcp_BC), '-b')
plt.xlim(0.0, 1.0)
# TODO: MOVE THESE TO A METHOD INSIDE THE CODE; MAKE THEM WORK WITH NEW ANALYSIS
import pandas as pd
def crossing_probability_table(transition):
tcp = transition.tcp
interface_lambdas = transition.interfaces.lambdas
values = [tcp(x) for x in interface_lambdas]
return pd.Series(values, index=interface_lambdas, name=transition.name)
def outer_crossing_probability(transition):
tcp = transition.tcp
interface_outer_lambda = transition.interfaces.lambdas[-1]
return tcp(interface_outer_lambda)
crossing_probability_table(mstis.from_state[stateA])
outer_crossing_probability(mstis.from_state[stateA])
tcp_AB(mstis.from_state[stateA].interfaces.lambdas[-1])
tcp_A = mstis.from_state[stateA].tcp
flux_dict = {
(transition.stateA, transition.interfaces[0]): transition._flux
for transition in mstis.transitions.values()
}
flux_dict
paths.analysis.tis.flux_matrix_pd(flux_dict)
state_names = [s.name for s in mstis.states]
outer_ctp_matrix = pd.DataFrame(columns=state_names, index=state_names)
for state_pair in mstis.transitions:
transition = mstis.transitions[state_pair]
outer_ctp_matrix.at[state_pair[0].name, state_pair[1].name] = transition.ctp[transition.ensembles[-1]]
outer_ctp_matrix
state_pair_names = {t: "{} => {}".format(t[0].name, t[1].name) for t in mstis.transitions}
ctp_by_interface = pd.DataFrame(index=state_pair_names.values())
for state_pair in mstis.transitions:
transition = mstis.transitions[state_pair]
for ensemble_i in range(len(transition.ensembles)):
state_pair_name = state_pair_names[transition.stateA, transition.stateB]
ctp_by_interface.at[state_pair_name, ensemble_i] = transition.conditional_transition_probability(
storage.steps,
transition.ensembles[ensemble_i]
)
ctp_by_interface
hists_A = mstis.transitions[(stateA, stateB)].histograms
hists_B = mstis.transitions[(stateB, stateC)].histograms
hists_C = mstis.transitions[(stateC, stateB)].histograms
hists = {'A': hists_A, 'B': hists_B, 'C': hists_C}
plot_style = {'A': '-r', 'B': '-b', 'C': '-k'}
for hist in [hists_A, hists_B, hists_C]:
for ens in hist['max_lambda']:
normalized = hist['max_lambda'][ens].normalized()
plt.plot(normalized.x, normalized)
# add visualization of the sum
for hist_type in hists:
hist = hists[hist_type]
for ens in hist['max_lambda']:
reverse_cumulative = hist['max_lambda'][ens].reverse_cumulative()
plt.plot(reverse_cumulative.x, reverse_cumulative, plot_style[hist_type])
plt.xlim(0.0, 1.0)
for hist_type in hists:
hist = hists[hist_type]
for ens in hist['max_lambda']:
reverse_cumulative = hist['max_lambda'][ens].reverse_cumulative()
plt.plot(reverse_cumulative.x, np.log(reverse_cumulative), plot_style[hist_type])
plt.xlim(0.0, 1.0)
for hist in [hists_A, hists_B, hists_C]:
for ens in hist['pathlength']:
normalized = hist['pathlength'][ens].normalized()
plt.plot(normalized.x, normalized)
for ens in hists_A['pathlength']:
normalized = hists_A['pathlength'][ens].normalized()
plt.plot(normalized.x, normalized)
scheme = storage.schemes[0]
scheme.move_summary(storage.steps)
scheme.move_summary(movers='shooting')
scheme.move_summary(movers='minus')
scheme.move_summary(movers='repex')
scheme.move_summary(movers='pathreversal')
repx_net = paths.ReplicaNetwork(scheme, storage.steps)
repx_net.mixing_matrix()
repxG = paths.ReplicaNetworkGraph(repx_net)
repxG.draw('spring')
import openpathsampling.visualize as vis
from IPython.display import SVG
tree = vis.PathTree(
storage.steps[0:500],
vis.ReplicaEvolution(replica=2, accepted=True)
)
SVG(tree.svg())
decorrelated = tree.generator.decorrelated
print("We have " + str(len(decorrelated)) + " decorrelated trajectories.")
# we use the %run magic because this isn't in a package
%run ../resources/toy_plot_helpers.py
background = ToyPlot()
background.contour_range = np.arange(-1.5, 1.0, 0.1)
background.add_pes(storage.engines[0].pes)
xval = paths.FunctionCV("xval", lambda snap : snap.xyz[0][0])
yval = paths.FunctionCV("yval", lambda snap : snap.xyz[0][1])
visualizer = paths.StepVisualizer2D(mstis, xval, yval, [-1.0, 1.0], [-1.0, 1.0])
visualizer.background = background.plot()
visualizer.draw_samples(list(tree.samples))
# NBVAL_SKIP
# The skip directive tells our test runner not to run this cell
import time
max_step = 10
for step in storage.steps[0:max_step]:
visualizer.draw_ipynb(step)
time.sleep(0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reaction rates
Step2: The self-rates (the rate of returning the to initial state) are undefined, and return not-a-number.
Step3: We normally look at these on a log scale
Step4: Now, in case you want to know the total crossing probabability at each interface (for example, to use as a bias in an SRTIS calculation)
Step5: Flux
Step6: Conditional transition probability
Step7: Path ensemble properties
Step8: Interface crossing probabilities
Step9: Path length histograms
Step10: Sampling properties
Step11: Replica exchange sampling
Step12: Replica exchange mixing matrix
Step13: Replica exchange graph
Step14: Replica exchange flow
Step15: Visualizing trajectories
|
4,737
|
<ASSISTANT_TASK:>
Python Code:
% matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Generate observations on the interval [0, 1)
x1 = np.random.uniform(low=0.0, high=2.0, size=100)
x2 = np.random.uniform(low=0.0, high=2.0, size=100)
X = np.matrix([x1, x2]).T
# Assign class labels based on the decision surface 2x1^3 + 3x2 = 5
y = np.where(((2 * (x1 ** 3)) + (3 * x2) >= 5), 1, -1)
# Plot the decision boundary
dec_x = np.arange(0, 2, 0.1)
dec_y = (5 - (2 *(dec_x ** 3))) / 3
plt.plot(dec_x, dec_y)
# Plot the samples
markers = ('x', 'o')
for idx, cl in enumerate(np.unique(y)):
plt.scatter(X[y == cl, 0], X[y == cl, 1],
marker=markers[idx], label=cl)
plt.xlim(0.0, 2.0)
plt.xlabel('X1')
plt.ylim(0.0, 2.0)
plt.ylabel('X2')
plt.legend(loc='upper left')
plt.show()
plt.close()
from sklearn import svm
# Instantiate the three classifiers
linear = svm.SVC(kernel='linear')
radial = svm.SVC(kernel='rbf')
poly_2 = svm.SVC(kernel='poly', degree=2)
poly_3 = svm.SVC(kernel='poly', degree=3)
classifiers = [linear, radial, poly_2, poly_3]
names = ['linear', 'radial', 'degree 2 polynomial', 'degree 3 polynomial']
# Fit classifiers to the training data and calculate accuracy
for name, classifier in zip(names, classifiers):
classifier.fit(X, y)
score = classifier.score(X, y)
print('''The {name} classifier has a mean accuracy of {score} \
on the training data.'''.format(name=name, score=str(int(score*100)) + '%'))
print()
print('''Hence, the polynomial and radial kernels out-perform the linear classifier.''')
# Generate observations on the interval [0, 1)
x1_test = np.random.uniform(low=0.0, high=2.0, size=1000)
x2_test = np.random.uniform(low=0.0, high=2.0, size=1000)
X_test = np.matrix([x1_test, x2_test]).T
# Assign class labels based on the decision surface 2x1^3 + 3x2 = 5
y_test = np.where(((2 * (x1_test ** 3)) + (3 * x2_test) >= 5), 1, -1)
for name, classifier in zip(names, classifiers):
# Predict training and test data
pred_train = classifier.predict(X)
pred_test = classifier.predict(X_test)
# Calculate error rates for training and test data
err_train = np.sum(np.where(pred_train != y, 1, 0)) / len(pred_train)
err_test = np.sum(np.where(pred_test != y_test, 1, 0)) / len(pred_test)
# Plot results
plt.plot(dec_x, dec_y, label='actual boundary')
markers = ('x', 'o')
for idx, cl in enumerate(np.unique(y_test)):
plt.scatter(X_test[pred_test == cl, 0], X_test[pred_test == cl, 1],
marker=markers[idx], label=cl)
plt.xlim(0.0, 2.0)
plt.xlabel('X1')
plt.ylim(0.0, 2.0)
plt.ylabel('X2')
plt.legend(loc='upper left')
plt.title('SVM with a {name} classifier'.format(name=name))
plt.show()
plt.close()
# Print results
print('''Training error for the {name} classifier: {err}'''.format(
name=name, err=str(err_train*100) + '%'))
print('''Testing error for the {name} classifier: {err}'''.format(
name=name, err=str(err_test*100) + '%'))
# Generate random training data
x1 = np.random.uniform(low=0.0, high=10.0, size=500)
x2 = np.random.uniform(low=0.0, high=10.0, size=500)
X = np.matrix([x1, x2]).T
y = np.where(0.15 * (x1 ** 2) - x2 > 0, 1, 0)
import math
# Plot the decision boundary
dec_x = np.arange(0, 10, 0.1)
dec_y = 0.15 * (dec_x ** 2)
plt.plot(dec_x, dec_y)
# Plot the samples
markers = ('x', 'o')
for idx, cl in enumerate(np.unique(y)):
plt.scatter(X[y == cl, 0], X[y == cl, 1],
marker=markers[idx], label=cl)
plt.xlim(0.0, 10.0)
plt.xlabel('X1')
plt.ylim(0.0, 10.0)
plt.ylabel('X2')
plt.legend(loc='upper left')
plt.show()
plt.close()
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression()
logistic.fit(X, y)
y_pred = logistic.predict(X)
# Plot the decision boundary
dec_x = np.arange(0, 10, 0.1)
dec_y = 0.15 * (dec_x ** 2)
plt.plot(dec_x, dec_y)
# Plot the samples
markers = ('x', 'o')
for idx, cl in enumerate(np.unique(y_pred)):
plt.scatter(X[y_pred == cl, 0], X[y_pred == cl, 1],
marker=markers[idx], label=cl)
plt.xlim(0.0, 10.0)
plt.xlabel('X1')
plt.ylim(0.0, 10.0)
plt.ylabel('X2')
plt.legend(loc='upper left')
plt.show()
plt.close()
logistic_sqd = LogisticRegression()
X_sqd = np.matrix([0.15 * (x1**2), x2]).T
logistic_sqd.fit(X_sqd, y)
y_pred = logistic_sqd.predict(X_sqd)
plt.plot(dec_x, dec_y)
for idx, cl in enumerate(np.unique(y_pred)):
plt.scatter(X[y_pred == cl, 0], X[y_pred == cl, 1],
marker=markers[idx], label=cl)
plt.xlim(0.0, 10.0)
plt.xlabel('X1')
plt.ylim(0.0, 10.0)
plt.ylabel('X2')
plt.legend(loc='upper left')
plt.title('Logistic regression with nonlinear features')
plt.show()
plt.close()
svc = svm.SVC(kernel='linear')
svc.fit(X, y)
y_pred = svc.predict(X)
plt.plot(dec_x, dec_y)
for idx, cl in enumerate(np.unique(y_pred)):
plt.scatter(X[y_pred == cl, 0], X[y_pred == cl, 1],
marker=markers[idx], label=cl)
plt.xlim(0.0, 10.0)
plt.xlabel('X1')
plt.ylim(0.0, 10.0)
plt.ylabel('X2')
plt.legend(loc='upper left')
plt.title('Kernel SVM')
plt.show()
plt.close()
svc = svm.SVC(kernel='rbf')
svc.fit(X, y)
y_pred = svc.predict(X)
plt.plot(dec_x, dec_y)
for idx, cl in enumerate(np.unique(y_pred)):
plt.scatter(X[y_pred == cl, 0], X[y_pred == cl, 1],
marker=markers[idx], label=cl)
plt.xlim(0.0, 10.0)
plt.xlabel('X1')
plt.ylim(0.0, 10.0)
plt.ylabel('X2')
plt.legend(loc='upper left')
plt.title('Kernel SVM')
plt.show()
plt.close()
def gini(px):
return (px * (1 - px)) + ((1 - px) * (1 - (1 - px)))
def entropy(px):
return -(px * np.log2(px)) - ((1 - px) * np.log2(1 - px))
def class_error(px):
return 1 - max([px, 1 - px])
x = np.arange(0, 1, 0.01)
classes = np.unique(y)
I_G = [gini(px) for px in x]
I_H = [entropy(px) if px != 0 else None for px in x]
I_E = [class_error(px) for px in x]
plt.plot(x, I_G, label='Gini')
plt.plot(x, I_H, label='Entropy')
plt.plot(x, I_E, label='Classification error')
plt.legend(loc='upper center', ncol=3, bbox_to_anchor=(0.5, 1.15))
plt.xlabel('Proportion of samples of a given class')
plt.ylabel('Impurity')
plt.show()
plt.close()
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
breast_cancer = load_breast_cancer()
data, target = breast_cancer.data, breast_cancer.target
X_train, X_test, y_train, y_test = train_test_split(
data, target, test_size=0.2, random_state=0)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
dtree = DecisionTreeClassifier()
dtree.fit(X_train, y_train)
score = accuracy_score(dtree.predict(X_train), y_train)
print('The accuracy of the decision tree is {score} on the training set.'.format(score=score))
print(dtree)
from sklearn.metrics import confusion_matrix
pred = dtree.predict(X_test)
confusion = confusion_matrix(pred, y_test)
accuracy = round(accuracy_score(pred, y_test), 3)
print('The classifier has an accuracy of {score} on the test data.'.format(score=accuracy))
print('Confusion matrix:')
print()
print(confusion)
scores = []
for i in range(1, 10):
cv_tree = DecisionTreeClassifier(max_depth=i)
cv_tree.fit(X_train, y_train)
scores.append(accuracy_score(cv_tree.predict(X_test), y_test))
highest_score = (0, 0)
for index, score in enumerate(scores):
if score > highest_score[1]:
highest_score = (index+1, score)
output = 'The highest score, {score}, occurs in a tree with node size {size}.'
print(output.format(score=round(highest_score[1], 3), size=highest_score[0]))
plt.plot(np.arange(1,10), scores)
plt.xlabel('Tree size')
plt.ylabel('Accuracy score')
plt.title('How big should our decision tree be?')
plt.show()
plt.close()
optimal_tree = DecisionTreeClassifier(max_depth=3)
optimal_tree.fit(X_train, y_train)
prune_acc_train = round(accuracy_score(optimal_tree.predict(X_train), y_train), 3)
noprune_acc_train = round(accuracy_score(dtree.predict(X_train), y_train), 3)
print('The pruned tree has an accuracy score of {score} on training data.'.format(score=prune_acc_train))
print('The un-pruned tree has an accuracy score of {score} on training data.'.format(score=noprune_acc_train))
prune_acc_test = round(accuracy_score(optimal_tree.predict(X_test), y_test), 3)
noprune_acc_test = round(accuracy_score(dtree.predict(X_test), y_test), 3)
print('The pruned tree has an accuracy score of {score} on test data.'.format(score=prune_acc_test))
print('The un-pruned tree has an accuracy score of {score} on test data.'.format(score=noprune_acc_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (b) Show that in this setting, a support vector machine with a polynomial kernel (with degree greater than 1) or a radial kernel will outperform a support vector classifier on the training data.
Step2: (c) Generate 1000 test observations through the same method that you used in (a).
Step3: (d) Which technique performs best on the test data? Make plots and report training and test error rates in order to back up your assertions.
Step4: As we can see in the error reports and plots above, the radial and polynomial classifiers perform equally well on the test data within 0.5% error. The polynomial classifier with degree 3 performs the best in most iterations of this experiment, as we would expect given the actual underlying decision boundary.
Step5: (b) Plot the observations, colored according to their class labels. Your plot should display X1 on the x-axis, and X2 on the y-axis.
Step6: (c) Fit a logistic regression model to the data, using X1 and X2 as predictors.
Step7: (d) Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be linear.
Step8: (e) Now fit a logistic regression model to the data using non-linear functions of X1 and X2 as predictors (e.g. X12, X1 ×X2, log(X2), and so forth).
Step9: (f) Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be obviously non-linear. If it is not, then repeat (a)-(e) until you come up with an example in which the predicted class labels are obviously non-linear.
Step10: (g) Fit a support vector classifier to the data with X1 and X2 as predictors. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels.
Step11: (h) Fit a SVM using a non-linear kernel to the data. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels.
Step12: (i) Comment on your results.
Step13: This problem tests your ability to train decision trees and reason about their effectiveness. It uses the built-in breast cancer dataset that ships with scikit-learn. You can import this dataset through the module method sklearn.datasets.load_breast_cancer.
Step14: (b) Fit a decision tree to the training data, with Purchase as the response and all other variables except for Buy as predictors. Produce summary statistics about the tree and describe the results obtained. What is the training error rate?
Step15: (c) Type in the name of the tree object in order to get a detailed text output.
Step16: (d) Predict the response on the test data, and produce a confusion matrix comparing the test labels to the predicted test labels. What is the test error rate?
Step17: (e) Apply a cross-validation function to the training set in order to determine the optimal tree size.
Step18: (f) Produce a plot with tree size on the x-axis and cross-validated classification error rate on the y-axis.
Step19: (g) Which tree size corresponds to the lowest cross-validated classification error rate?
Step20: (i) Compare the training error rates between the pruned and un-pruned trees. Which is higher?
Step21: (j) Compare the test error rates between the pruned and unpruned trees. Which is higher?
|
4,738
|
<ASSISTANT_TASK:>
Python Code:
import math as math
def ones_to_words(n):
onesdict = {0: "",
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
}
return onesdict[n]
def teens_to_words(n):
teendict = {10: "ten",
11: "eleven",
12: "twelve",
13: "thirteen",
14: "fourteen",
15: "fifteen",
16: "sixteen",
17: "seventeen",
18: "eighteen",
19: "nineteen",
}
return teendict[n]
def tens_to_words(n):
tensdict = {2: "twenty",
3: "thirty",
4: "forty",
5: "fifty",
6: "sixty",
7: "seventy",
8: "eighty",
9: "ninety",
}
return tensdict[n]
def number_to_words(n):
Given a number n between 1-1000 inclusive return a list of words for the number.
cent = n // 100
tens = int(n % 100) // 10
ones = int(n % 10)
words = ""
if cent > 0:
# hundreds
if cent == 10:
words += "one thousand"
else:
words += (ones_to_words(cent) + " hundred ")
# tens and ones
if tens == 0:
if ones == 0:
return words
else:
words += "and " + ones_to_words(ones)
elif tens == 1:
words += "and " + teens_to_words(10 * tens + ones)
else:
words += "and " + tens_to_words(tens) + "-" + ones_to_words(ones)
else:
# tens and ones
if tens == 0:
words += ones_to_words(ones)
elif tens == 1:
words += teens_to_words(10 * tens + ones)
else:
words += tens_to_words(tens) + "-" + ones_to_words(ones)
return words
#raise NotImplementedError()
assert number_to_words(4) == "four"
assert number_to_words(58) == "fifty-eight"
assert number_to_words(409) == "four hundred and nine"
assert number_to_words(1000) == "one thousand"
assert number_to_words(712) == "seven hundred and twelve"
#raise NotImplementedError()
assert True # use this for grading the number_to_words tests.
def count_letters(n):
Count the number of letters used to write out the words for 1-n inclusive.
x = number_to_words(n)
x = x.replace("-", " ")
return sum([len(y) for y in x.split(" ")])
#raise NotImplementedError()
assert count_letters(4) == 4
assert count_letters(58) == 10
assert count_letters(409) == 18
assert count_letters(1000) == 11
assert count_letters(712) == 21
#raise NotImplementedError()
assert True # use this for grading the count_letters tests.
total_letters = 0
for i in range(1, 1001):
total_letters += count_letters(i)
print(total_letters)
#raise NotImplementedError()
assert True # use this for gradig the answer to the original question.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Project Euler
Step2: Now write a set of assert tests for your number_to_words function that verifies that it is working as expected.
Step4: Now define a count_letters(n) that returns the number of letters used to write out the words for all of the the numbers 1 to n inclusive.
Step5: Now write a set of assert tests for your count_letters function that verifies that it is working as expected.
Step6: Finally used your count_letters function to solve the original question.
|
4,739
|
<ASSISTANT_TASK:>
Python Code:
from collections import namedtuple
Subscriber = namedtuple("Subscriber", ["addr", "joined"])
sub = Subscriber("jonesy@example.com", "2012-10-19")
sub
sub.addr
sub.joined
len(sub)
addr, joined = sub
addr
joined
def compute_cost(records):
total = 0.0
for rec in records:
total += rec[1] * rec[2]
return total
from collections import namedtuple
Stock = namedtuple("Stock", ["name", "shares", "price"])
def compute_cost(records):
total = 0.0
for rec in records:
s = Stock(*rec)
total += s.shares * s.price
return total
s = Stock("ACME", 100, 123.45)
s
s.shares
s.shares = 75
s = s._replace(shares = 75)
s
from collections import namedtuple
Stock = namedtuple("Stock", ["name", "shares", "price", "date", "time"])
# Create a prototype instance
stock_prototype = Stock("", 0, 0.0, None, None)
# Function to convert a dictionary to a Stock
def dict_to_stock(s):
return stock_prototype._replace(**s)
a = {"name": "ACME", "shares": 100, "price": 123.45}
dict_to_stock(a)
b = {'name': 'ACME', 'shares': 100, 'price': 123.45, 'date': '12/17/2012'}
dict_to_stock(b)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 尽管 namedtuple 的实例看起来像一个普通的类实例,但是它跟元组类型是可交换的,支持所有的普通元组操作,比如索引和解压。 比如:
Step2: 命名元组的一个主要用途是将你的代码从下标操作中解脱出来。 因此,如果你从数据库调用中返回了一个很大的元组列表,通过下标去操作其中的元素, 当你在表中添加了新的列的时候你的代码可能就会出错了。但是如果你使用了命名元组,那么就不会有这样的顾虑。
Step3: 下标操作通常会让代码表意不清晰,并且非常依赖记录的结构。 下面是使用命名元组的版本:
Step4: 讨论
Step5: 如果你真的需要改变然后的属性,那么可以使用命名元组实例的 _replace() 方法, 它会创建一个全新的命名元组并将对应的字段用新的值取代。比如:
Step6: _replace() 方法还有一个很有用的特性就是当你的命名元组拥有可选或者缺失字段时候, 它是一个非常方便的填充数据的方法。 你可以先创建一个包含缺省值的原型元组,然后使用 _replace() 方法创建新的值被更新过的实例。比如:
Step7: 下面是它的使用方法:
|
4,740
|
<ASSISTANT_TASK:>
Python Code:
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.poutine.runtime import effectful
pyro.set_rng_seed(101)
def scale(guess):
weight = pyro.sample("weight", dist.Normal(guess, 1.0))
return pyro.sample("measurement", dist.Normal(weight, 0.75))
def make_log_joint(model):
def _log_joint(cond_data, *args, **kwargs):
conditioned_model = poutine.condition(model, data=cond_data)
trace = poutine.trace(conditioned_model).get_trace(*args, **kwargs)
return trace.log_prob_sum()
return _log_joint
scale_log_joint = make_log_joint(scale)
print(scale_log_joint({"measurement": 9.5, "weight": 8.23}, 8.5))
from pyro.poutine.trace_messenger import TraceMessenger
from pyro.poutine.condition_messenger import ConditionMessenger
def make_log_joint_2(model):
def _log_joint(cond_data, *args, **kwargs):
with TraceMessenger() as tracer:
with ConditionMessenger(data=cond_data):
model(*args, **kwargs)
trace = tracer.trace
logp = 0.
for name, node in trace.nodes.items():
if node["type"] == "sample":
if node["is_observed"]:
assert node["value"] is cond_data[name]
logp = logp + node["fn"].log_prob(node["value"]).sum()
return logp
return _log_joint
scale_log_joint = make_log_joint_2(scale)
print(scale_log_joint({"measurement": 9.5, "weight": 8.23}, 8.5))
class LogJointMessenger(poutine.messenger.Messenger):
def __init__(self, cond_data):
self.data = cond_data
# __call__ is syntactic sugar for using Messengers as higher-order functions.
# Messenger already defines __call__, but we re-define it here
# for exposition and to change the return value:
def __call__(self, fn):
def _fn(*args, **kwargs):
with self:
fn(*args, **kwargs)
return self.logp.clone()
return _fn
def __enter__(self):
self.logp = torch.tensor(0.)
# All Messenger subclasses must call the base Messenger.__enter__()
# in their __enter__ methods
return super().__enter__()
# __exit__ takes the same arguments in all Python context managers
def __exit__(self, exc_type, exc_value, traceback):
self.logp = torch.tensor(0.)
# All Messenger subclasses must call the base Messenger.__exit__ method
# in their __exit__ methods.
return super().__exit__(exc_type, exc_value, traceback)
# _pyro_sample will be called once per pyro.sample site.
# It takes a dictionary msg containing the name, distribution,
# observation or sample value, and other metadata from the sample site.
def _pyro_sample(self, msg):
# Any unobserved random variables will trigger this assertion.
# In the next section, we'll learn how to also handle sampled values.
assert msg["name"] in self.data
msg["value"] = self.data[msg["name"]]
# Since we've observed a value for this site, we set the "is_observed" flag to True
# This tells any other Messengers not to overwrite msg["value"] with a sample.
msg["is_observed"] = True
self.logp = self.logp + (msg["scale"] * msg["fn"].log_prob(msg["value"])).sum()
with LogJointMessenger(cond_data={"measurement": 9.5, "weight": 8.23}) as m:
scale(8.5)
print(m.logp.clone())
scale_log_joint = LogJointMessenger(cond_data={"measurement": 9.5, "weight": 8.23})(scale)
print(scale_log_joint(8.5))
def log_joint(model=None, cond_data=None):
msngr = LogJointMessenger(cond_data=cond_data)
return msngr(model) if model is not None else msngr
scale_log_joint = log_joint(scale, cond_data={"measurement": 9.5, "weight": 8.23})
print(scale_log_joint(8.5))
class LogJointMessenger2(poutine.messenger.Messenger):
def __init__(self, cond_data):
self.data = cond_data
def __call__(self, fn):
def _fn(*args, **kwargs):
with self:
fn(*args, **kwargs)
return self.logp.clone()
return _fn
def __enter__(self):
self.logp = torch.tensor(0.)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self.logp = torch.tensor(0.)
return super().__exit__(exc_type, exc_value, traceback)
def _pyro_sample(self, msg):
if msg["name"] in self.data:
msg["value"] = self.data[msg["name"]]
msg["done"] = True
def _pyro_post_sample(self, msg):
assert msg["done"] # the "done" flag asserts that no more modifications to value and fn will be performed.
self.logp = self.logp + (msg["scale"] * msg["fn"].log_prob(msg["value"])).sum()
with LogJointMessenger2(cond_data={"measurement": 9.5, "weight": 8.23}) as m:
scale(8.5)
print(m.logp)
def monte_carlo_elbo(model, guide, batch, *args, **kwargs):
# assuming batch is a dictionary, we use poutine.condition to fix values of observed variables
conditioned_model = poutine.condition(model, data=batch)
# we'll approximate the expectation in the ELBO with a single sample:
# first, we run the guide forward unmodified and record values and distributions
# at each sample site using poutine.trace
guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)
# we use poutine.replay to set the values of latent variables in the model
# to the values sampled above by our guide, and use poutine.trace
# to record the distributions that appear at each sample site in in the model
model_trace = poutine.trace(
poutine.replay(conditioned_model, trace=guide_trace)
).get_trace(*args, **kwargs)
elbo = 0.
for name, node in model_trace.nodes.items():
if node["type"] == "sample":
elbo = elbo + node["fn"].log_prob(node["value"]).sum()
if not node["is_observed"]:
elbo = elbo - guide_trace.nodes[name]["fn"].log_prob(node["value"]).sum()
return -elbo
def train(model, guide, data):
optimizer = pyro.optim.Adam({})
for batch in data:
# this poutine.trace will record all of the parameters that appear in the model and guide
# during the execution of monte_carlo_elbo
with poutine.trace() as param_capture:
# we use poutine.block here so that only parameters appear in the trace above
with poutine.block(hide_fn=lambda node: node["type"] != "param"):
loss = monte_carlo_elbo(model, guide, batch)
loss.backward()
params = set(node["value"].unconstrained()
for node in param_capture.trace.nodes.values())
optimizer.step(params)
pyro.infer.util.zero_grads(params)
def sequential_discrete_marginal(model, data, site_name="_RETURN"):
from six.moves import queue # queue data structures
q = queue.Queue() # Instantiate a first-in first-out queue
q.put(poutine.Trace()) # seed the queue with an empty trace
# as before, we fix the values of observed random variables with poutine.condition
# assuming data is a dictionary whose keys are names of sample sites in model
conditioned_model = poutine.condition(model, data=data)
# we wrap the conditioned model in a poutine.queue,
# which repeatedly pushes and pops partially completed executions from a Queue()
# to perform breadth-first enumeration over the set of values of all discrete sample sites in model
enum_model = poutine.queue(conditioned_model, queue=q)
# actually perform the enumeration by repeatedly tracing enum_model
# and accumulate samples and trace log-probabilities for postprocessing
samples, log_weights = [], []
while not q.empty():
trace = poutine.trace(enum_model).get_trace()
samples.append(trace.nodes[site_name]["value"])
log_weights.append(trace.log_prob_sum())
# we take the samples and log-joints and turn them into a histogram:
samples = torch.stack(samples, 0)
log_weights = torch.stack(log_weights, 0)
log_weights = log_weights - dist.util.logsumexp(log_weights, dim=0)
return dist.Empirical(samples, log_weights)
class LazyValue:
def __init__(self, fn, *args, **kwargs):
self._expr = (fn, args, kwargs)
self._value = None
def __str__(self):
return "({} {})".format(str(self._expr[0]), " ".join(map(str, self._expr[1])))
def evaluate(self):
if self._value is None:
fn, args, kwargs = self._expr
fn = fn.evaluate() if isinstance(fn, LazyValue) else fn
args = tuple(arg.evaluate() if isinstance(arg, LazyValue) else arg
for arg in args)
kwargs = {k: v.evaluate() if isinstance(v, LazyValue) else v
for k, v in kwargs.items()}
self._value = fn(*args, **kwargs)
return self._value
class LazyMessenger(pyro.poutine.messenger.Messenger):
def _process_message(self, msg):
if msg["type"] in ("apply", "sample") and not msg["done"]:
msg["done"] = True
msg["value"] = LazyValue(msg["fn"], *msg["args"], **msg["kwargs"])
@effectful(type="apply")
def add(x, y):
return x + y
@effectful(type="apply")
def mul(x, y):
return x * y
@effectful(type="apply")
def sigmoid(x):
return torch.sigmoid(x)
@effectful(type="apply")
def normal(loc, scale):
return dist.Normal(loc, scale)
def biased_scale(guess):
weight = pyro.sample("weight", normal(guess, 1.))
tolerance = pyro.sample("tolerance", normal(0., 0.25))
return pyro.sample("measurement", normal(add(mul(weight, 0.8), 1.), sigmoid(tolerance)))
with LazyMessenger():
v = biased_scale(8.5)
print(v)
print(v.evaluate())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: This model defines a joint probability distribution over "weight" and "measurement"
Step3: That snippet is short, but still somewhat opaque - poutine.condition, poutine.trace, and trace.log_prob_sum are all black boxes. Let's remove a layer of boilerplate from poutine.condition and poutine.trace and explicitly implement what trace.log_prob_sum is doing
Step4: This makes things a little more clear
Step5: A convenient bit of boilerplate that allows the use of LogJointMessenger as a context manager, decorator, or higher-order function is the following. Most of the existing effect handlers in pyro.poutine, including poutine.trace and poutine.condition which we used earlier, are Messengers wrapped this way in pyro.poutine.handlers.
Step6: The Messenger API in more detail
Step7: Inside the messages sent by Messengers
Step8: We use poutine.trace and poutine.block to record pyro.param calls for optimization
Step9: Example
Step10: (Note that sequential_discrete_marginal is very general, but is also quite slow. For high-performance parallel enumeration that applies to a less general class of models, see the enumeration tutorial.)
Step11: With LazyValue, implementing lazy evaluation as a Messenger compatible with other effect handlers is suprisingly easy. We just make each msg["value"] a LazyValue and introduce a new operation type "apply" for deterministic operations
Step12: Finally, just like torch.autograd overloads torch tensor operations to record an autograd graph, we need to wrap any operations we'd like to be lazy. We'll use pyro.poutine.runtime.effectful as a decorator to expose these operations to LazyMessenger. effectful constructs a message much like the one above and sends it up and down the effect handler stack, but allows us to set the type (in this case, to "apply" instead of "sample") so that these operations aren't mistaken for sample statements by other effect handlers like TraceMessenger
Step13: Applied to another model
|
4,741
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'giss-e2-1g', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
4,742
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
X = [0,1,2,3,4]
Fx = [x**2 for x in X]
fig = plt.plot(X,Fx)
plt.show(fig)
fig,axes = plt.subplots(2,2)
F0 = [x**0 for x in X]
F1 = [x**1 for x in X]
F2 = [x**2 for x in X]
F3 = [x**3 for x in X]
axes[0,0].plot(X,F0)
axes[0,1].plot(X,F1)
axes[1,0].plot(X,F2)
axes[1,1].plot(X,F3)
plt.show(fig)
fig = plt.figure()
ax12 = fig.add_subplot(2,1,1) # This one fills
# the top half of the picture
ax3 = fig.add_subplot(2,2,3) # These ones fill half of the
ax4 = fig.add_subplot(2,2,4) # space left each.
ax12.plot(X,Fx)
ax3.plot(X,F0)
ax4.plot(X,F1)
plt.show(fig)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Even though we can dispose the Axes how we want inside the figure,
Step2: Another useful way to create grids of plots is by creating a figure and then adding subplots to it with the add_subplot function. With add subplots, you specify the grid structure you are imagining in your mind, and he will return you an Axes with these dimensions. This function takes 3 parameters
|
4,743
|
<ASSISTANT_TASK:>
Python Code:
# This is a comment
# This is code cell, we execute it pressing also Shift + Intro
print 1+2
a = 5
b = 10
print "a + b = %d" % (a + b)
print "a * b = %d" % (a * b)
%lsmagic
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the notebook we can execute cells with more than one line of code, in the style of matlab or mathematica
Step2: In the menu we can find useful drop-down menus to perform simple tasks to edit our notebook. The best way to know them is to experiment
|
4,744
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pickle
%matplotlib inline
def read_weather():
with open('data/weather.pkl', 'rb') as f:
return pickle.load(f)
# The file weather.pkl contains a list of dictionaries
Data = read_weather()
Data[0]
# Implement Q1 part 1 here:
# -------------------------
# tempr_dict = ...
# Run this cell to tests if you have completed part 1 correctly:
assert(all(key in tempr_dict for key in ['Berlin', 'Kiev', 'London', 'Moscow', 'Southampton'])),\
'keys of your weather dictionary should be Berlin, Kiev, London, Moscow, Southampton'
assert(len(tempr_dict['Moscow']) == 2), "Entries in your dictionary should be a tuple of two values"
# Part 2:
# Implement Q2 pt 1 here
# -----------------------
# weather_dict = ...
# Implement Q2 pt 2 here
# -----------------------
def plot_weather_lattitude(weather_dictionary, var_name):
# YOUR CODE HERE
return None
# Here are the variables that should be in your data
weather_vars = ['temp', 'temp_max', 'temp_min', 'pressure', 'humidity']
# Loop over the variable strings above and plot them using your
# plot_weather_lattitude function:
for var_name in weather_vars:
plot_weather_lattitude(weather_dict, var_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The purpose of the exersise is to manipulate and plot the current weather of a number of European cities. The data has been downloaded from Openweather, and has been loaded for you below using the given function read_weather.
Step2: Part 1
Step3: Optional extension
|
4,745
|
<ASSISTANT_TASK:>
Python Code:
# Load the sociopatterns network data.
#G = cf.load_sociopatterns_network()
G=nx.read_gpickle('Synthetic Social Network.pkl')
# Let's find out the number of neighbors that individual #7 has.
len(G.neighbors(7))
G.nodes(data=True)
G.edges(data=True)
sorted([(n,G.neighbors(n)) for n in G.nodes()], key=lambda x: len(x[1]), reverse=True)
nx.degree_centrality(G)
# Possible Answers:
fig = plt.figure(0)
degree_centralities = [dc for n, dc in nx.degree_centrality(G).items()]
plt.hist(degree_centralities)
plt.title('Degree Centralities')
fig = plt.figure(1)
neighbors = [len(G.neighbors(n)) for n in G.nodes()]
plt.hist(neighbors)
plt.title('Number of Neighbors')
fig = plt.figure(2)
plt.scatter(degree_centralities, neighbors)
plt.xlabel('Degree Centralities')
plt.ylabel('Number of Neighbors')
from circos import CircosPlot
import numpy as np
nodes = G.nodes()
edges = G.edges()
edgeprops = dict(alpha=0.5) # set the alpha value to 0.1
nodecolor = plt.cm.viridis(np.arange(len(nodes)) / len(nodes)) # be sure to use viridis!
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
c = CircosPlot(nodes, edges, radius=10, ax=ax, fig=fig, edgeprops=edgeprops, nodecolor=nodecolor)
c.draw()
plt.savefig('images/sociopatterns.png', dpi=300)
def path_exists(node1, node2, G):
This function checks whether a path exists between two nodes (node1, node2) in graph G.
Special thanks to @ghirlekar for suggesting that we keep track of the "visited nodes" to
prevent infinite loops from happening.
Reference: https://github.com/ericmjl/Network-Analysis-Made-Simple/issues/3
visited_nodes = set()
queue = [node1]
# Fill in code below
for node in queue:
neighbors = G.neighbors(node)
if node2 in neighbors:
print('Path exists between nodes {0} and {1}'.format(node1, node2))
return True
break
else:
queue.remove(node)
visited_nodes.add(node)
queue.extend([n for n in neighbors if n not in visited_nodes])
if len(queue) == 0:
print('Path does not exist between nodes {0} and {1}'.format(node1, node2))
return False
# Test your answer below
def test_path_exists():
print(path_exists(18, 5, G))
print(path_exists(29, 26, G))
test_path_exists()
nx.has_path(G, 29, 26)
nx.shortest_path(G, 4, 14)
# Possible Answer:
def extract_path_edges(G, source, target):
# Check to make sure that a path does exists between source and target.
if nx.has_path(G, source, target):
shor = nx.shortest_path(G, source, target)
newG = G.subgraph(shor)
return newG
else:
raise Exception('Path does not exist between nodes {0} and {1}.'.format(source, target))
newG = extract_path_edges(G, 1, 14)
nx.draw(newG, with_labels=True)
def extract_neighbor_edges(G, node):
return newG
fig = plt.figure(0)
newG = extract_neighbor_edges(G, 19)
nx.draw(newG, with_labels=True)
def extract_neighbor_edges2(G, node):
return newG
fig = plt.figure(1)
newG = extract_neighbor_edges2(G, 19)
nx.draw(newG, with_labels=True)
# Possible answer to Question 1:
# All we need here is the length of the path.
def compute_transmission_time(G, source, target):
Fill in code below.
return __________
compute_transmission_time(G, 14, 4)
# Possible answer to Question 2:
# We need to know the length of every single shortest path between every pair of nodes.
# If we don't put a source and target into the nx.shortest_path_length(G) function call, then
# we get a dictionary of dictionaries, where all source-->target-->lengths are shown.
lengths = []
times = []
## Fill in code below ##
plt.figure(0)
plt.bar(Counter(lengths).keys(), Counter(lengths).values())
plt.figure(1)
plt.bar(Counter(times).keys(), Counter(times).values())
btws = nx.betweenness_centrality(G, normalized=False)
plt.bar(btws.keys(), btws.values())
plt.scatter(__________, ____________)
plt.xlabel('degree')
plt.ylabel('betweeness')
plt.title('centrality scatterplot')
nx.draw(nx.barbell_graph(5, 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hubs
Step2: Exercise
Step3: Approach 2
Step4: If you inspect the dictionary closely, you will find that node 51 is the one that has the highest degree centrality, just as we had measured by counting the number of neighbors.
Step5: Exercise
Step7: What can you deduce about the structure of the network, based on this visualization?
Step8: If you write an algorithm that runs breadth-first, the recursion pattern is likely to follow what we have done above. If you do a depth-first search (i.e. DFS), the recursion pattern is likely to look a bit different. Take it as a challenge exercise to figure out how a DFS looks like.
Step9: NetworkX also has other shortest path algorithms implemented.
Step10: Incidentally, the node list is in order as well.
Step11: Challenge Exercise (at home)
Step13: Challenge Exercises (at home)
Step14: Hubs Revisited
Step15: Exercise
Step16: Think about it...
|
4,746
|
<ASSISTANT_TASK:>
Python Code:
sl = s.GetSurfaceDataLayout(0)
print(sl)
arr = np.swapaxes(np.array(s.GetSurfaceData(0).GetDataShorts())[0,0,...],0,2)
print(arr.shape)
vx = (sl.mExtendMaxX-sl.mExtendMinX)/(sl.mSizeX-1)
vy = (sl.mExtendMaxY-sl.mExtendMinY)/(sl.mSizeY-1)
vz = (sl.mExtendMaxZ-sl.mExtendMinZ)/(sl.mSizeZ-1)
print(vx,vy,vz)
normals = np.swapaxes(np.array(s.GetSurfaceNormals(0).GetDataShorts())[0,0,...],0,2)
print(normals.shape)
verts, faces, normals, values = measure.marching_cubes_lewiner(arr, 0, spacing=(vz, vy, vx))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(verts[:, 2]+sl.mExtendMinX, verts[:,1]+sl.mExtendMinY, faces, verts[:, 0]+sl.mExtendMinZ,
lw=1,
antialiased=True)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GetSurfaceData()
Step2: GetSurfaceNormals()
|
4,747
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('bokeh', 'matplotlib')
%opts Ellipse [xaxis=None yaxis=None] (color='red' line_width=2)
%opts Box [xaxis=None yaxis=None] (color='blue' line_width=2)
def annotations(angle):
radians = (angle / 180) * np.pi
return (hv.Box(0,0,4, orientation=np.pi/4)
* hv.Ellipse(0,0,(2,4), orientation=radians)
* hv.Text(0,0,'{0}º'.format(float(angle))))
hv.DynamicMap(annotations, kdims=['angle']).redim.range(angle=(0, 360)).redim.label(angle='angle (º)')
from holoviews import streams
from holoviews.streams import Stream
Angle = Stream.define('Angle', angle=0)
hv.help(Angle)
%%opts Box (color='green')
dmap=hv.DynamicMap(annotations, streams=[Angle(angle=45)])
dmap
dmap.event(angle=90)
# Exercise: Regenerate the DynamicMap, initializing the angle to 15 degrees
# Exercise: Use dmap.event to set the angle shown to 145 degrees.
# Exercise: Do not specify an initial angle so that the default value of 0 degrees is used.
# Exercise: Use the cell magic %%output backend='matplotlib' to try the above with matplotlib
# Exercise: Declare a DynamicMap using annotations2 and AngleAndSize
# Then use the event method to set the size to 1.5 and the angle to 30 degrees
def annotations2(angle, size):
radians = (angle / 180.) * np.pi
return (hv.Box(0,0,4, orientation=np.pi/4)
* hv.Ellipse(0,0,(size,size*2), orientation=radians)
* hv.Text(0,0,'{0}º'.format(float(angle))))
AngleAndSize = Stream.define('AngleAndSize', angle=0., size=1.)
%%opts Ellipse (color='orange')
dmap2=hv.DynamicMap(annotations, streams=[Angle(angle=0)])
dmap2
dmap2.periodic(0.01, count=180, timeout=8, param_fn=lambda i: {'angle':i})
# Exercise: Experiment with different period values. How fast can things update?
# Exercise: Increase count so that the oval completes a full rotation.
# Exercise: Lower the timeout so the oval completes less than a quarter turn before stopping
%%opts HLine [xaxis=None yaxis=None]
pointer = streams.PointerXY(x=0, y=0)
def crosshair(x, y):
return hv.Ellipse(0,0,1) * hv.HLine(y) * hv.VLine(x)
hv.DynamicMap(crosshair, streams=[pointer])
# Exercise: Set the defaults so that the crosshair initializes at x=0.25, y=0.25
# Exercise: Copy the above example and adapt it to make a red point of size 10 follow your cursor (using hv.Points)
%%opts Scatter[width=900 height=400 tools=['xbox_select'] ] (cmap='RdBu' line_color='black' size=5 line_width=0.5)
%%opts Scatter [color_index='latitude' colorbar=True colorbar_position='bottom' colorbar_opts={'title': 'Latitude'}]
eclipses = pd.read_csv('../data/eclipses_21C.csv', parse_dates=['date'])
magnitudes = hv.Scatter(eclipses, kdims=['hour_local'], vdims=['magnitude','latitude'])
def selection_example(index):
text = '{0} eclipses'.format(len(index)) if index else ''
return magnitudes * hv.Text(2,1, text)
dmap3 = hv.DynamicMap(selection_example, streams=[streams.Selection1D()])
dmap3.redim.label(magnitude='Eclipse Magnitude', hour_local='Hour (local time)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A simple DynamicMap
Step2: This example uses the concepts introduced in the exploring with containers section. As before, the argument angle is supplied by the position of the 'angle' slider.
Step3: Here Angle is capitalized as it is a subclass of Stream with a numeric angle parameter, which has a default value of zero. You can verify this using hv.help
Step4: Now we can declare a DynamicMap where instead of specifying kdims, we instantiate Angle with an angle of 45º and pass it to the streams parameter of the DynamicMap
Step5: As expected, we see our ellipse with an angle of 45º as specified via the angle parameter of our Angle instance. In itself, this wouldn't be very useful but given that we have a handle on our DynamicMap dmap, we can now use the event method to update the angle parameter value and update the plot
Step6: When running this cell, the visualization above will jump to the 90º position! If you have already run the cell, just change the value above and re-run, and you'll see the plot above update.
Step7: Periodic updates
Step8: If you re-execute the above cell, you should see the preceding plot update continuously until the count value is reached.
Step9: Linked streams
Step10: When hovering in the plot above when backed by a live Python process, the crosshair will track the cursor.
Step11: You can view other similar examples of custom interactivity in our reference gallery and learn more about linked streams in the user guide. Here is a quick summary of some of the more useful linked stream classes HoloViews currently offers and the parameters they supply
|
4,748
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!KAGGLE_CONFIG_DIR=`pwd` kaggle datasets download joshmcadams/oranges-vs-grapefruit
!ls
!unzip -o oranges-vs-grapefruit.zip
!ls
import pandas as pd
citrus_df = pd.read_csv('citrus.csv', header=0)
citrus_df.sample(10, random_state=2020)
# Your Solution Goes Here
citrus_df.describe()
# Your Solution Goes Here
import altair as alt
citrus_df_sample = citrus_df.sample(n=5000, random_state=2020)
alt.Chart(citrus_df_sample, width=400).mark_boxplot().encode(
x='name',
y='diameter'
)
alt.Chart(citrus_df_sample, width=400).mark_boxplot().encode(
x='name',
y='diameter'
) | alt.Chart(citrus_df_sample, width=400).mark_boxplot().encode(
x='name',
y='weight'
)
# Your Solution Goes Here
alt.Chart(citrus_df_sample).mark_circle().encode(
x='diameter',
y='weight',
color='name'
)
alt.Chart(citrus_df_sample, width=400).mark_boxplot().encode(
x='name',
y='red'
) | alt.Chart(citrus_df_sample, width=400).mark_boxplot().encode(
x='name',
y='green'
) | alt.Chart(citrus_df_sample, width=400).mark_boxplot().encode(
x='name',
y='blue'
)
citrus_df.groupby('name')['name'].count()
from PIL import Image
from matplotlib.pyplot import imshow
import numpy as np
height, width = 50, 100
img = Image.new('RGB', (width, height), color=(255, 255, 255))
pixels = img.load()
row_i, col_i = 0, 0
for _, fruit in citrus_df[citrus_df['name'] == 'orange'].iterrows():
pixels[col_i, row_i] = (fruit['red'], fruit['green'], fruit['blue'])
col_i += 1
if col_i >= width:
col_i = 0
row_i += 1
imshow(img)
# Your Solution Goes Here
citrus_df.columns
# Your Solution Goes Here
citrus_df.groupby('is_orange')['is_orange'].count()
citrus_df[citrus_df['is_orange']]['name'].unique()
target_column = 'is_orange'
feature_columns = ['diameter', 'weight', 'red', 'green', 'blue']
target_column, feature_columns
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
citrus_df[feature_columns],
citrus_df[target_column],
test_size=0.2,
random_state=180,
shuffle=True)
y_train.groupby(y_train).count()
y_test.groupby(y_test).count()
# Your Code Goes Here
X_train.shape, y_train.shape
X_test.shape, y_test.shape
y_train.describe()
y_test.describe()
y_test.groupby(by=y_test).count()
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=2020)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print('Accuracy: ', round(accuracy_score(predictions, y_test), 3))
print('Precision: ', round(precision_score(predictions, y_test), 3))
print('Recall: ', round(recall_score(predictions, y_test), 3))
print('F1: ', round(f1_score(predictions, y_test), 3))
from sklearn.metrics import confusion_matrix
tn, fp, fn, tp = confusion_matrix(y_test, predictions).ravel()
print(f'True Positive: {tp}\nTrue Negative: {tn}\nFalse Positive: {fp}\nFalse Negative: {fn}')
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
scores = model.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, scores, pos_label=True)
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.plot(fpr, tpr)
plt.show()
from sklearn.metrics import precision_recall_curve
scores = model.decision_function(X_test)
precision, recall, _ = precision_recall_curve(y_test, scores)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.plot(recall, precision)
plt.show()
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split, GridSearchCV
citrus_df = pd.read_csv('citrus.csv', header=0)
citrus_df['is_orange'] = citrus_df['name'].apply(lambda name: name == 'orange')
target_column = 'is_orange'
feature_columns = ['diameter', 'weight', 'red', 'green', 'blue']
X_train, X_validate, y_train, y_validate = train_test_split(
citrus_df[feature_columns],
citrus_df[target_column],
test_size=0.2,
random_state=42,
shuffle=True,
stratify=citrus_df[target_column])
model = LogisticRegression(
random_state=2020,
)
search = GridSearchCV(model, {
# Your Solution Goes Here
})
search.fit(X_train, y_train)
print(search.best_estimator_)
# Your Solution Goes Here
# Your code goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Binary Classification
Step2: There should now be an oranges-vs-grapefruit.zip file in the virtual machine for this lab. Let's unzip it so we can access the data.
Step3: There is now a citrus.csv file in our virtual machine. Let's start digging into the data next.
Step4: It looks like we have a mixed bag of fruit containing oranges and grapefruit, just as expected.
Step5: Interpreting Our Histogram
Step6: Since every count is 10,000, we don't seem to have missing values.
Step7: Visualizing With Box Plots
Step8: Next we will use the mark_boxplot method of the Chart class to create our boxplot.
Step9: What insights can we glean from this graphic?
Step10: Correlation
Step11: Understanding the Correlation
Step12: We can see that oranges and grapefruit have very similar rates of weight gain as their diameter increases. This shouldn't be too surprising since they are very similar fruits.
Step13: There doesn't seem to be a lot of value there, at least examining each element of color separately. There is quite a bit of overlap between each color element, with grapefruit displaying a little less red and green typically.
Step14: As expected, we have 5000 samples each. We can create a 100x50 image for each fruit type and visualize the data.
Step15: That looks like a pretty reasonable orange color. What about the grapefruit?
Step16: Data Analysis Summary
Step17: We'll use 'diameter', 'weight', 'red', 'green', and 'blue' as feature columns. Using 'name' for our target column is tempting, but remember that it contains fruit names for values, and for this exercise, we are only interested in determining if a piece of fruit is an orange or not an orange. Let's create a new column called 'is_orange' that contains the value True if the datum is an orange and False otherwise.
Step18: Examining Our New Target Column
Step19: There should be 5,000 True values and 5,000 False values.
Step20: We should only see a single value in the unique list ('orange') since all rows with 'is_orange' set to True should have a 'name' of 'orange'.
Step21: We need to split the data into a training and testing set. In this case we'll split 20% of the data off for testing and train on the other 80%. We can use scikit-learn's train_test_split function to do this. It is also a really good idea to shuffle our data, and train_test_split allows us to do this too.
Step22: Hmm. It looks like our training set has become a little uneven. Ideally, we would maintain the same ratio of oranges to non-oranges in our training and testing groups as the ratio in the whole set (50/50). But after splitting the data, we've ended up with a training set that skews towards non-oranges, and a test set that skews the opposite way, towards oranges.
Step23: Luckily, there's a solution for this problem
Step24: Examining The Split Data
Step25: And 20% in testing.
Step26: Let's look at the training data and see if it stratified correctly.
Step27: From this output we can see that there are 8,000 pieces of data with 2 unique values. The top value is True, and it occurs 4,000 times. That would leave us with 4,000 other values that are False.
Step28: Another alternative is to use groupby on the series. Notice that the by argument contains the series once again and not a column name.
Step29: Create and Train the Model
Step30: Measure Model Performance
Step31: Now we can use metrics functions from scikit-learn to see how well our model performed. We'll check the accuracy, precision, recall, and F1 scores.
Step32: Numbers for most of the metrics are above 90%, which is better than Cindy was sorting!
Step33: We have just under 100 falsely identified fruit. There are about twice as many false negatives as there are false positives. Let's take a few minutes to think about what this confusion matrix means.
Step34: We can see that there is a steep increase in false positives as the true positive rate crosses into the 90% range.
Step35: This shows the balance between precision and recall as the model adjusts classification thresholds.
Step36: Question 2
Step37: Question 3
|
4,749
|
<ASSISTANT_TASK:>
Python Code:
### Import libaries
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
import time
import background as bg # import background.py
from IPython.display import HTML
%matplotlib inline
### Chessboard Corners
# Prepare object points
nx = 9
ny = 6
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Object points and Image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
chessboard_corners = []
images = glob.glob('camera_cal/calibration*.jpg')
for fname in images:
image = mpimg.imread(fname)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Find chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
chessboard_corners.append(cv2.drawChessboardCorners(image, (nx,ny), corners, ret))
else:
chessboard_corners.append(image)
# Create subplots in figure
fig = plt.figure(figsize=(20, 15))
for i in range(1,len(chessboard_corners)+1):
fig.add_subplot(5,4,i)
fig.tight_layout()
plt.imshow(chessboard_corners[i-1])
plt.axis('off')
plt.show()
# Camera Calibration
img = mpimg.imread('camera_cal/calibration3.jpg')
img_size = (img.shape[1], img.shape[0])
# Calibrate Camera
ret, camera_mtx, camera_dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
# Undistort Test Image
undistort_image = cv2.undistort(img, camera_mtx, camera_dist, None, camera_mtx)
bg.disp_img(img, undistort_image)
# save and Load Camera Calibration Pickle Data
bg.save()
bg.load()
image = mpimg.imread('test_images/straight_lines1.jpg')
undistorted, src_corners, dst_corners = bg.undistort_img(image)
bg.disp_img(image, undistorted)
threshold_image = bg.color_threshold(undistorted)
bg.disp_img(undistorted, threshold_image)
warped_original, M_Inv = bg.perspective_transform(image)
bg.disp_img(image, warped_original)
warped_image, M_Inv = bg.perspective_transform(threshold_image)
bg.disp_img(threshold_image, warped_image)
left_lane_idx, right_lane_idx = bg.lane_coordinates(warped_image)
image_lane_point = bg.draw_lane_points(warped_image,left_lane_idx, right_lane_idx)
bg.disp_img(warped_image, image_lane_point)
img_size = [image.shape[1], image.shape[0]]
left_lane_y, right_lane_y, left_fit_x, right_fit_x, left_fit, right_fit = bg.identify_lane(left_lane_idx,
right_lane_idx, img_size)
out_img = bg.draw_curved_line(image_lane_point, right_fit)
out_img_2 = bg.draw_curved_line(out_img,left_fit )
bg.disp_img(warped_image,out_img)
img = mpimg.imread('test_images/test1.jpg')
final_img = bg.pipeline(img)
bg.disp_img(img, final_img)
img = mpimg.imread('test_images/test2.jpg')
final_img = bg.pipeline(img)
bg.disp_img(img, final_img)
img = mpimg.imread('test_images/test3.jpg')
final_img = bg.pipeline(img)
bg.disp_img(img, final_img)
bg.make_video(video_path = "input_videos/project_video.mp4", file_out = "output_videos/project_output.mp4")
x = [[1,2,3],
[4,5,6],
[7,8,9]]
x
x = np.array(x)
x
nonzero= x.nonzero()
nonzero
nonzeroy=np.array(nonzero[0])
nonzeroy
nonzerox=np.array(nonzero[1])
nonzerox
good=((nonzeroy<1)&(nonzerox>0))
good
good=((nonzeroy<1)&(nonzerox>0)).nonzero()
good
good=((nonzeroy<1)&(nonzerox>0)).nonzero()[0]
good
a=[]
a.append(good)
a
a.append(good)
a
a[1][0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Camera Calibration
Step2: Plots all images, only the ones with the correct grid sizes have corners drawn on them.
Step3: apply the camera calibration a selected image.
Step4: Pipeline (single images)
Step5: 2. Describe how (and identify where in your code) you used color transforms, gradients or other methods to create a thresholded binary image. Provide an example of a binary image result.
Step6: 3. Describe how (and identify where in your code) you performed a perspective transform and provide an example of a transformed image.
Step7: 4. Describe how (and identify where in your code) you identified lane-line pixels and fit their positions with a polynomial?
Step8: 5. Describe how (and identify where in your code) you calculated the radius of curvature of the lane and the position of the vehicle with respect to center.
Step9: Creating videos
Step10: Discussion
|
4,750
|
<ASSISTANT_TASK:>
Python Code:
# A bit of setup
import numpy as np
import matplotlib.pyplot as plt
from time import time
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the TinyImageNet-100-B dataset
from cs231n.data_utils import load_tiny_imagenet, load_models
tiny_imagenet_b = 'cs231n/datasets/tiny-imagenet-100-B'
class_names, X_train, y_train, X_val, y_val, X_test, y_test = load_tiny_imagenet(tiny_imagenet_b)
# Zero-mean the data
mean_img = np.mean(X_train, axis=0)
X_train -= mean_img
X_val -= mean_img
X_test -= mean_img
# We will use a subset of the TinyImageNet-B training data
mask = np.random.choice(X_train.shape[0], size=5000, replace=False)
X_train = X_train[mask]
y_train = y_train[mask]
# Load a pretrained model; it is a five layer convnet.
models_dir = 'cs231n/datasets/tiny-100-A-pretrained'
model = load_models(models_dir)['model1']
for names in class_names:
print ' '.join('"%s"' % name for name in names)
# Visualize some examples of the training data
classes_to_show = 7
examples_per_class = 5
class_idxs = np.random.choice(len(class_names), size=classes_to_show, replace=False)
for i, class_idx in enumerate(class_idxs):
train_idxs, = np.nonzero(y_train == class_idx)
train_idxs = np.random.choice(train_idxs, size=examples_per_class, replace=False)
for j, train_idx in enumerate(train_idxs):
img = X_train[train_idx] + mean_img
img = img.transpose(1, 2, 0).astype('uint8')
plt.subplot(examples_per_class, classes_to_show, 1 + i + classes_to_show * j)
if j == 0:
plt.title(class_names[class_idx][0])
plt.imshow(img)
plt.gca().axis('off')
plt.show()
from cs231n.classifiers.convnet import five_layer_convnet
# These should store extracted features for the training and validation sets
# respectively.
#
# More concretely, X_train_feats should be an array of shape
# (X_train.shape[0], 512) where X_train_feats[i] is the 512-dimensional
# feature vector extracted from X_train[i] using model.
#
# Similarly X_val_feats should have shape (X_val.shape[0], 512) and
# X_val_feats[i] should be the 512-dimensional feature vector extracted from
# X_val[i] using model.
X_train_feats = None
X_val_feats = None
# Use our pre-trained model to extract features on the subsampled training set
# and the validation set.
################################################################################
# TODO: Use the pretrained model to extract features for the training and #
# validation sets for TinyImageNet-100-B. #
# #
# HINT: Similar to computing probabilities in the previous exercise, you #
# should split the training and validation sets into small batches to avoid #
# using absurd amounts of memory. #
################################################################################
X_train_feats = five_layer_convnet(X_train, model, y=None, reg=0.0,
extract_features=True)
X_val_feats = five_layer_convnet(X_val, model, y=None, reg=0.0,
extract_features=True)
pass
################################################################################
# END OF YOUR CODE #
################################################################################
from cs231n.classifiers.k_nearest_neighbor import KNearestNeighbor
# Predicted labels for X_val using a k-nearest-neighbor classifier trained on
# the features extracted from X_train. knn_y_val_pred[i] = c indicates that
# the kNN classifier predicts that X_val[i] has label c.
knn_y_val_pred = None
################################################################################
# TODO: Use a k-nearest neighbor classifier to compute knn_y_val_pred. #
# You may need to experiment with k to get the best performance. #
################################################################################
knn = KNearestNeighbor()
knn.train(X_train_feats, y_train)
knn_y_val_pred = knn.predict(X_val_feats, k=25)
pass
################################################################################
# END OF YOUR CODE #
################################################################################
print 'Validation set accuracy: %f' % np.mean(knn_y_val_pred == y_val)
dists = knn.compute_distances_no_loops(X_val_feats)
num_imgs = 5
neighbors_to_show = 6
query_idxs = np.random.randint(X_val.shape[0], size=num_imgs)
next_subplot = 1
first_row = True
for query_idx in query_idxs:
query_img = X_val[query_idx] + mean_img
query_img = query_img.transpose(1, 2, 0).astype('uint8')
plt.subplot(num_imgs, neighbors_to_show + 1, next_subplot)
plt.imshow(query_img)
plt.gca().axis('off')
if first_row:
plt.title('query')
next_subplot += 1
o = np.argsort(dists[query_idx])
for i in xrange(neighbors_to_show):
img = X_train[o[i]] + mean_img
img = img.transpose(1, 2, 0).astype('uint8')
plt.subplot(num_imgs, neighbors_to_show + 1, next_subplot)
plt.imshow(img)
plt.gca().axis('off')
if first_row:
plt.title('neighbor %d' % (i + 1))
next_subplot += 1
first_row = False
from cs231n.classifiers.linear_classifier import Softmax
softmax_y_train_pred = None
softmax_y_val_pred = None
################################################################################
# TODO: Train a softmax classifier to predict a TinyImageNet-100-B class from #
# features extracted from our pretrained ConvNet. Use this classifier to make #
# predictions for the TinyImageNet-100-B training and validation sets, and #
# store them in softmax_y_train_pred and softmax_y_val_pred. #
# #
# You may need to experiment with number of iterations, regularization, and #
# learning rate in order to get good performance. The softmax classifier #
# should achieve a higher validation accuracy than the kNN classifier. #
################################################################################
softmax = Softmax()
# NOTE: the input X of softmax classifier if an array of shape D x N
softmax.train(X_train_feats.T, y_train,
learning_rate=1e-2, reg=1e-4, num_iters=1000)
y_train_pred = softmax.predict(X_train_feats.T)
y_val_pred = softmax.predict(X_val_feats.T)
pass
################################################################################
# END OF YOUR CODE #
################################################################################
print y_val_pred.shape, y_train_pred.shape
train_acc = np.mean(y_train == y_train_pred)
val_acc = np.mean(y_val_pred == y_val)
print train_acc, val_acc
from cs231n.classifier_trainer import ClassifierTrainer
# Make a copy of the pretrained model
model_copy = {k: v.copy() for k, v in model.iteritems()}
# Initialize the weights of the last affine layer using the trained weights from
# the softmax classifier above
model_copy['W5'] = softmax.W.T.copy().astype(model_copy['W5'].dtype)
model_copy['b5'] = np.zeros_like(model_copy['b5'])
# Fine-tune the model. You will need to adjust the training parameters to get good results.
trainer = ClassifierTrainer()
learning_rate = 1e-4
reg = 1e-1
dropout = 0.5
num_epochs = 2
finetuned_model = trainer.train(X_train, y_train, X_val, y_val,
model_copy, five_layer_convnet,
learning_rate=learning_rate, reg=reg, update='rmsprop',
dropout=dropout, num_epochs=num_epochs, verbose=True)[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data and model
Step2: TinyImageNet-100-B classes
Step3: Visualize Examples
Step4: Extract features
Step5: kNN with ConvNet features
Step6: Visualize neighbors
Step7: Softmax on ConvNet features
Step8: Fine-tuning
|
4,751
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import pandas as pd
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
import warnings
from itertools import product
def invboxcox(y,lmbda):
if lmbda == 0:
return(np.exp(y))
else:
return(np.exp(np.log(lmbda*y+1)/lmbda))
wine = pd.read_csv('monthly-australian-wine-sales.csv',',', index_col=['month'], parse_dates=['month'], dayfirst=True)
wine.sales = wine.sales * 1000
plt.figure(figsize(15,7))
wine.sales.plot()
plt.ylabel('Wine sales')
pylab.show()
plt.figure(figsize(15,10))
sm.tsa.seasonal_decompose(wine.sales).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wine.sales)[1])
wine['sales_box'], lmbda = stats.boxcox(wine.sales)
plt.figure(figsize(15,7))
wine.sales_box.plot()
plt.ylabel(u'Transformed wine sales')
print("Оптимальный параметр преобразования Бокса-Кокса: %f" % lmbda)
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wine.sales_box)[1])
wine['sales_box_diff'] = wine.sales_box - wine.sales_box.shift(12)
plt.figure(figsize(15,10))
sm.tsa.seasonal_decompose(wine.sales_box_diff[12:]).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wine.sales_box_diff[12:])[1])
wine['sales_box_diff2'] = wine.sales_box_diff - wine.sales_box_diff.shift(1)
plt.figure(figsize(15,10))
sm.tsa.seasonal_decompose(wine.sales_box_diff2[13:]).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(wine.sales_box_diff2[13:])[1])
plt.figure(figsize(15,8))
ax = plt.subplot(211)
sm.graphics.tsa.plot_acf(wine.sales_box_diff2[13:].values.squeeze(), lags=48, ax=ax)
pylab.show()
ax = plt.subplot(212)
sm.graphics.tsa.plot_pacf(wine.sales_box_diff2[13:].values.squeeze(), lags=48, ax=ax)
pylab.show()
ps = range(0, 5)
d=1
qs = range(0, 3)
Ps = range(0, 2)
D=1
Qs = range(0, 2)
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
%%time
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
#try except нужен, потому что на некоторых наборах параметров модель не обучается
try:
model=sm.tsa.statespace.SARIMAX(wine.sales_box, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12)).fit(disp=-1)
#выводим параметры, на которых модель не обучается и переходим к следующему набору
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
#сохраняем лучшую модель, aic, параметры
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
warnings.filterwarnings('default')
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
plt.figure(figsize(15,8))
plt.subplot(211)
best_model.resid[13:].plot()
plt.ylabel(u'Residuals')
ax = plt.subplot(212)
sm.graphics.tsa.plot_acf(best_model.resid[13:].values.squeeze(), lags=48, ax=ax)
print("Критерий Стьюдента: p=%f" % stats.ttest_1samp(best_model.resid[13:], 0)[1])
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(best_model.resid[13:])[1])
wine['model'] = invboxcox(best_model.fittedvalues, lmbda)
plt.figure(figsize(15,7))
wine.sales.plot()
wine.model[13:].plot(color='r')
plt.ylabel('Wine sales')
pylab.show()
wine2 = wine[['sales']]
date_list = [datetime.datetime.strptime("1994-09-01", "%Y-%m-%d") + relativedelta(months=x) for x in range(0,36)]
future = pd.DataFrame(index=date_list, columns= wine2.columns)
wine2 = pd.concat([wine2, future])
wine2['forecast'] = invboxcox(best_model.predict(start=176, end=211), lmbda)
plt.figure(figsize(15,7))
wine2.sales.plot()
wine2.forecast.plot(color='r')
plt.ylabel('Wine sales')
pylab.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Проверка стационарности и STL-декомпозиция ряда
Step2: Стабилизация дисперсии
Step3: Стационарность
Step4: Критерий Дики-Фуллера не отвергает гипотезу нестационарности, и полностью избавиться от тренда не удалось. Попробуем добавить ещё обычное дифференцирование
Step5: Гипотеза нестационарности отвергается, и визуально ряд выглядит лучше — тренда больше нет.
Step6: Начальные приближения
Step7: Если в предыдущей ячейке возникает ошибка, убедитесь, что обновили statsmodels до версии не меньше 0.8.0rc1.
Step8: Лучшая модель
Step9: Её остатки
Step10: Остатки несмещены (подтверждается критерием Стьюдента) стационарны (подтверждается критерием Дики-Фуллера и визуально), неавтокоррелированы (подтверждается критерием Льюнга-Бокса и коррелограммой).
Step11: Прогноз
|
4,752
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random valued following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
batch_size = 100
num_hiddens = 200
graph = tf.Graph()
with graph.as_default():
#input
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size,image_size*image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size,num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
tf_valid_labels = tf.constant(valid_labels) #invalid
tf_test_labels = tf.constant(test_labels)
#variables
weights1 = tf.Variable(tf.truncated_normal([image_size*image_size,num_hiddens]))
biases1 = tf.Variable(tf.zeros([num_hiddens]))
weights2 = tf.Variable(tf.truncated_normal([num_hiddens, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
#training computation
hiddens1_input = tf.matmul(tf_train_dataset,weights1)+biases1
hiddens1_output = tf.nn.relu(hiddens1_input)
logits = tf.matmul(hiddens1_output,weights2)+biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
#optimizer
optimizer = tf.train.GradientDescentOptimizer(0.3).minimize(loss)
#predictions
tf_train_prediction = tf.nn.softmax(logits)
tf_valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset,weights1)+biases1),weights2)+biases2)
tf_test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset,weights1)+biases1),weights2)+biases2)
# training
num_steps = 4000
with tf.Session(graph=graph) as sess:
# initilze variables
init_graph = tf.initialize_all_variables()
sess.run(init_graph)
print("Initialized!")
#training iterations
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = sess.run([optimizer, loss, tf_train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(tf_valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(tf_test_prediction.eval(), test_labels))
print("----------------------------------------")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First reload the data we generated in 1_notmnist.ipynb.
Step2: Reformat into a shape that's more adapted to the models we're going to train
Step3: We're first going to train a multinomial logistic regression using simple gradient descent.
Step4: Let's run this computation and iterate
Step5: Let's now switch to stochastic gradient descent training instead, which is much faster.
Step6: Let's run it
Step7: Problem
|
4,753
|
<ASSISTANT_TASK:>
Python Code:
import os
os.chdir('~/Codes/DL - Topic Modelling')
from __future__ import print_function, division
import sys
import timeit
from six.moves import cPickle as pickle
import numpy as np
import pandas as pd
import theano
import theano.tensor as T
from lib.deeplearning import deepbeliefnet
# loading the data and transforming it into theano compatible variables
dat_x = np.genfromtxt('data/dtm_20news.csv', dtype='float32', delimiter=',', skip_header = 1)
dat_y = dat_x[:,0]
dat_x = dat_x[:,1:]
vocab = np.genfromtxt('data/dtm_20news.csv', dtype=str, delimiter=',', max_rows = 1)[1:]
x = theano.shared(dat_x)
y = T.cast(dat_y, dtype='int32')
model = deepbeliefnet(architecture = [2756, 500, 500, 128])
model.pretrain(input = x, pretraining_epochs = 10, output_path = 'params/to_delete')
import matplotlib.pyplot as plt
%matplotlib inline
plt_dat = np.genfromtxt('params_2000/dbn_params_pretrain/lproxy_layer_2.csv', delimiter=',', names = True)[:20]
plt.plot(plt_dat)
plt.show()
model = deepbeliefnet(architecture = [2756, 500, 500, 128], opt_epochs = [900,5,10],
predefined_weights = 'params/dbn_params')
output = model.score(input = x)
colnames = ['bit'] * 128
colnames = [colnames[i] + str(i) for i in range(128)]
colnames.insert(0,'_label_')
pd.DataFrame(data = np.c_[dat_y, output],
columns = colnames). \
to_csv( 'data/dbn_features.csv', index = False)
model = deepbeliefnet(architecture = [2756, 500, 500, 128], opt_epochs = [900,5,10], n_outs = 20, predefined_weights = 'params/dbn_params')
#model.train(x=x, y=y,batch_size = 70, training_epochs = 10, output_path = 'params/to_delete')
model.train(x=x, y=y, training_epochs = 10000, learning_rate = (1/70)/2, batch_size = 120,
drop_out = [0.2, .5, .5, .5], output_path = 'params/to_delete')
model = deepbeliefnet(architecture = [2756, 500, 500, 128], n_outs = 20, predefined_weights = 'params/to_delete/trained_dbn.pkl')
sum([1 for i, j in zip(model.predict(x), dat_y) if i == j])/len(dat_y)
model.predict(x,prob=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ----------------------------------------------------------------------------------------------------------------------------------------------------------
Step2: Visualizing the changes in likelihood for each iteration
Step3: ----------------------------------------------------------------------------------------------------------------------------------------------------------
Step4: ----------------------------------------------------------------------------------------------------------------------------------------------------------
Step5: Loading the trained MLP
Step6: Calculating the accuracy of the trained model on the full dataset
Step7: Demonstrating how the predict function can be used to calculated the classes
|
4,754
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pysra
%matplotlib inline
# Increased figure sizes
plt.rcParams["figure.dpi"] = 120
m = pysra.motion.SourceTheoryRvtMotion(6.0, 30, "wna")
m.calc_fourier_amps()
fig, ax = plt.subplots()
ax.plot(m.freqs, m.fourier_amps)
ax.set(
xlabel="Frequency (Hz)", xscale="log", ylabel="Fourier Ampl. (g-s)", yscale="log"
)
fig.tight_layout();
profile = pysra.site.Profile(
[
pysra.site.Layer(pysra.site.SoilType("Soil", 18.0, None, 0.05), 30, 400),
pysra.site.Layer(pysra.site.SoilType("Rock", 24.0, None, 0.01), 0, 1200),
]
)
calc = pysra.propagation.LinearElasticCalculator()
freqs = np.logspace(-1, 2, num=500)
outputs = pysra.output.OutputCollection(
[
pysra.output.ResponseSpectrumOutput(
# Frequency
freqs,
# Location of the output
pysra.output.OutputLocation("outcrop", index=0),
# Damping
0.05,
),
pysra.output.ResponseSpectrumRatioOutput(
# Frequency
freqs,
# Location in (denominator),
pysra.output.OutputLocation("outcrop", index=-1),
# Location out (numerator)
pysra.output.OutputLocation("outcrop", index=0),
# Damping
0.05,
),
]
)
calc(m, profile, profile.location("outcrop", index=-1))
outputs(calc)
for o in outputs:
o.plot(style="indiv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a point source theory RVT motion
Step2: Create site profile
Step3: Create the site response calculator
Step4: Specify the output
Step5: Perform the calculation
Step6: Calculate all of the outputs from the calculation object.
Step7: Plot the outputs
|
4,755
|
<ASSISTANT_TASK:>
Python Code:
# Meme().display_meme_help()
from eden.util import configure_logging
import logging
configure_logging(logging.getLogger(),verbosity=2)
from utilities import Weblogo
wl = Weblogo(color_scheme='classic')
meme1 = Meme(alphabet="dna", # {ACGT}
gap_in_alphabet=False,
mod="anr", # Any number of repititions
output_dir="meme_anr",
nmotifs=3, # Number of motives to be found
weblogo_obj = wl
)
meme1.fit(fasta_file="seq18.fa")
predictions = meme1.predict(input_seqs=test, return_list=True)
for p in predictions: print p
predictions = meme1.predict(input_seqs="seq9.fa", return_list=False)
for p in predictions: print p
match = meme1.transform(input_seqs=test, return_match=True)
for m in match: print m
match = meme1.transform(input_seqs=test, return_match=False)
for m in match: print m
print meme1.e_values
meme2 = Meme(alphabet="dna", mod="anr", nmotifs=3)
predictions = meme2.fit_predict(fasta_file="seq18.fa", return_list=True)
for p in predictions: print p
matches = meme2.fit_transform(fasta_file="seq18.fa", return_match=True)
for m in matches: print m
#printing motives as lists
for motif in meme1.motives_list:
for m in motif:
print m
print
meme1.display_logo(do_alignment=False)
meme1.display_logo(motif_num=1)
meme1.align_motives() #MSA with Muscle
motives1=meme1.aligned_motives_list
for m in motives1:
for i in m:
print i
print
meme1.display_logo(do_alignment=True)
meme1.display()
meme1.matrix()
meme1.display(motif_num=3)
test_seq = 'GGAGAAAATACCGC' * 10
seq_score = meme1.score(motif_num=2, seq=test_seq)
print seq_score
meme2 = Meme(alphabet="dna", scoring_criteria="hmm", k=1, threshold=1.0,mod="anr", nmotifs=3, minw=7, maxw=9)
matches = meme2.fit_transform(fasta_file="seq9.fa", return_match=True)
for m in matches: print m
%%time
# Markov Model score
mm_score = meme2.score(motif_num=2, seq="ACGT"*10)
print mm_score
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3>E-value of each motif</h3>
Step2: <h2>fit_predict() and fit_transform() example</h2>
Step3: <h3>Print motives as lists</h3>
Step4: <h3>Display Sequence logo of un-aligned motives</h3>
Step5: <h3>Display Logo of specified motif</h3>
Step6: <h3>Multiple Sequence Alignment of motives with Muscle</h3>
Step7: <h3>Display sequence logo of aligned motives</h3>
Step8: <h3>Position Weight Matrices for motifs</h3>
Step9: <h4>Display PWM of single motif</h4>
Step10: <h4>Scoring a sequence w.r.t a motif</h4>
Step11: <h3> Transform with HMM as scoring criteria</h3>
|
4,756
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from clustergrammer_widget import *
net = Network(clustergrammer_widget)
net.load_file('rc_two_cats.txt')
net.cluster()
net.widget()
df_genes = net.widget_df()
df_genes.shape
net.load_df(df_genes)
net.cluster()
net.widget()
# generate random matrix
num_rows = 500
num_cols = 10
np.random.seed(seed=100)
mat = np.random.rand(num_rows, num_cols)
# make row and col labels
rows = range(num_rows)
cols = range(num_cols)
rows = [str(i) for i in rows]
cols = [str(i) for i in cols]
# make dataframe
df = pd.DataFrame(data=mat, columns=cols, index=rows)
net.load_df(df)
net.cluster()
net.widget()
df_random = net.widget_df()
df_random.shape
net.load_df(df_random)
net.cluster()
net.widget()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make widget using new API
Step2: Above, we have filtered the matrix to a region of interest using the brush cropping tool. Below we will get export this region of interest, defined on the front end, to a DataFrame, df_genes. This demonstrates the two-way communication capabilities of widgets.
Step3: Above, we made a new widget visualizing this region of interest.
Step4: Above, we selected a region of interest using the front-end brush crop tool and export to DataFrame, df_random. Below we will visualize it using a new widget.
|
4,757
|
<ASSISTANT_TASK:>
Python Code:
## you can inspect the autosave code to see what it does.
%autosave??
profile_dir = ! ipython locate
profile_dir = profile_dir[0]
profile_dir
import os.path
custom_js_path = os.path.join(profile_dir,'profile_default','static','custom','custom.js')
# my custom js
with open(custom_js_path) as f:
for l in f:
print l,
%%javascript
var CellToolbar = IPython.CellToolbar
var toggle = function(div, cell) {
var button_container = $(div)
// let's create a button that show the current value of the metadata
var button = $('<button/>').addClass('btn btn-mini').text(String(cell.metadata.foo));
// On click, change the metadata value and update the button label
button.click(function(){
var v = cell.metadata.foo;
cell.metadata.foo = !v;
button.text(String(!v));
})
// add the button to the DOM div.
button_container.append(button);
}
// now we register the callback under the name foo to give the
// user the ability to use it later
CellToolbar.register_callback('tuto.foo', toggle);
%%javascript
IPython.CellToolbar.register_preset('Tutorial 1',['tuto.foo','default.rawedit'])
IPython.CellToolbar.register_preset('Tutorial 2',['slideshow.select','tuto.foo'])
%load soln/celldiff.js
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: custom.js
Step2: and custom js is in
Step3: Note that custom.js is ment to be modified by user, when writing a script, you can define it in a separate file and add a line of configuration into custom.js that will fetch and execute the file.
Step4: Registering a preset
Step5: You should now have access to two presets
|
4,758
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
import socnet as sn
sn.graph_width = 320
sn.graph_height = 180
g = sn.load_graph('5-kruskal.gml', has_pos=True)
for e in g.edges_iter():
g.edge[e[0]][e[1]]['label'] = g.edge[e[0]][e[1]]['c']
sn.show_graph(g, elab=True)
class Forest(object):
def __init__(self, g):
self.g = g
self.f = set()
for n in g.nodes():
self._make_set(n)
def _make_set(self, x):
g.node[x]['p'] = x
g.node[x]['rank'] = 0
def _union(self, x, y):
self._link(self._find_set(x), self._find_set(y))
def _link(self, x, y):
if g.node[x]['rank'] > g.node[y]['rank']:
g.node[y]['p'] = x
else:
g.node[x]['p'] = y
if g.node[x]['rank'] == g.node[y]['rank']:
g.node[y]['rank'] = g.node[y]['rank'] + 1
def _find_set(self, x):
if x != g.node[x]['p']:
g.node[x]['p'] = self._find_set(g.node[x]['p'])
return g.node[x]['p']
def adding_does_not_form_circuit(self, n, m):
return self._find_set(n) != self._find_set(m)
def add(self, n, m):
self.f.add((n, m))
self._union(n, m)
from math import inf, isinf
def snapshot(g, frames):
frame = sn.generate_frame(g, nlab=False, elab=True)
frames.append(frame)
red = (255, 0, 0)
blue = (0, 0, 255)
green = (0, 255, 0)
frames = []
f = Forest (g)
edges = []
e = g.edges_iter()
for i in e:
edges.append((i[0],i[1],g.get_edge_data(i[0],i[1])['c']))
edges.sort(reverse = True, key=lambda x: (-x[2],x[0]))
sn.reset_node_colors(g)
sn.reset_edge_colors(g)
snapshot(g, frames)
for n,m,c in edges:
g.edge[m][n]['color'] = green
snapshot(g, frames)
if(f.adding_does_not_form_circuit(n,m)):
g.edge[m][n]['color'] = blue
snapshot(g, frames)
f.add(n,m)
else:
g.edge[m][n]['color'] = sn.edge_color
snapshot(g, frames)
sn.show_animation(frames)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A seguir, vamos configurar as propriedades visuais
Step2: Por fim, vamos carregar e visualizar um grafo
Step3: Árvores geradoras mínimas
Step4: Exercício
|
4,759
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# K is the delivery price agreed upon in the contract
K = 50
# Here we look at various different values that S_T can have
S_T = np.linspace(0, 100, 200)
# Calculate the long and short payoffs
long_payoff = S_T - K
short_payoff = K - S_T
plt.plot(S_T, long_payoff)
plt.axhline(0, color='black', alpha=0.3)
plt.axvline(0, color='black', alpha=0.3)
plt.xlim(0, 100)
plt.ylim(-100, 100)
plt.axvline(K, linestyle='dashed', color='r', label='K')
plt.ylabel('Payoff')
plt.xlabel('$S_T$')
plt.title('Payoff of a Long Forward Contract')
plt.legend();
plt.plot(S_T, short_payoff);
plt.axhline(0, color='black', alpha=0.3)
plt.axvline(0, color='black', alpha=0.3)
plt.xlim(0, 100)
plt.ylim(-100, 100)
plt.axvline(K, linestyle='dashed', color='r', label='K')
plt.ylabel('Payoff')
plt.xlabel('$S_T$')
plt.title('Payoff of a Short Forward Contract')
plt.legend();
contract = symbols('CNH17')
futures_position_value = get_pricing(contract, start_date = '2017-01-19', end_date = '2017-02-15', fields = 'price')
futures_position_value.name = futures_position_value.name.symbol
futures_position_value.plot()
plt.title('Corn Futures Price')
plt.xlabel('Date')
plt.ylabel('Price');
initial_margin = 990
maintenance_margin = 900
contract_count = 5
# We hit two margin calls over this time period
margin_account_changes = futures_position_value.diff()*contract.multiplier*contract_count
margin_account_changes[0] = initial_margin*contract_count
margin_account_balance = margin_account_changes.cumsum()
margin_account_balance.name = 'Margin Account Balance'
# First margin call
margin_call_idx = np.where(margin_account_balance < maintenance_margin*contract_count)[0][0]
margin_deposit = initial_margin*contract_count - margin_account_balance[margin_call_idx]
margin_account_balance[margin_call_idx+1:] = margin_account_balance[margin_call_idx+1:] + margin_deposit
# Second margin call
second_margin_call_idx = np.where(margin_account_balance < maintenance_margin*contract_count)[0][1]
second_margin_deposit = initial_margin*contract_count - margin_account_balance[second_margin_call_idx]
margin_account_balance[second_margin_call_idx+1:] = margin_account_balance[second_margin_call_idx+1:] + second_margin_deposit
(futures_position_value*contract.multiplier).plot()
margin_account_balance.plot()
plt.axvline(margin_account_balance.index[margin_call_idx], color='r', linestyle='--')
plt.axvline(margin_account_balance.index[second_margin_call_idx], color='r', linestyle='--')
plt.axhline(maintenance_margin*contract_count, color='r', linestyle='--')
plt.title('Overall Value of a Futures Contract with the Margin Account Balance')
plt.xlabel('Date')
plt.ylabel('Value')
plt.legend();
contracts = symbols(['ESH17', 'NGH17'])
volume_comparison = get_pricing(contracts, start_date = '2016-12-01', end_date = '2017-04-01', fields = 'volume')
volume_comparison.plot()
plt.title('Volume of S&P 500 E-Mini and Natural Gas Contracts for March Delivery')
plt.xlabel('Date')
plt.ylabel('Volume');
print volume_comparison.max()
cls = symbols(['CLF16', 'CLG16', 'CLH16'])
contract_volume = get_pricing(cls, start_date='2015-10-01', end_date='2016-04-01', fields='volume')
contract_volume.plot()
plt.title('Volume of Contracts with Different Expiry')
plt.xlabel('Date')
plt.ylabel('Volume');
cl_january_contract = symbols('CLF16')
print cl_january_contract.expiration_date
es_march_contract = symbols('ESH17')
print es_march_contract.expiration_date
assets = ['SPY', 'ESH16']
prices = get_pricing(assets, start_date = '2015-01-01', end_date = '2016-04-15', fields = 'price')
prices.columns = map(lambda x: x.symbol, prices.columns)
prices['ESH16'].plot()
(10*prices['SPY']).plot()
plt.legend()
plt.title('Price of a S&P 500 E-Mini Contract vs SPY')
plt.xlabel('Date')
plt.ylabel('Price');
X = (10*prices['SPY'][:'2016-03-15'] - prices['ESH16'][:'2016-03-15'])**2
X.plot()
plt.title('MSE of SPY and ESH17')
plt.xlabel('Date')
plt.ylabel('MSE');
contracts = symbols(['CLF17', 'CLG17', 'CLH17', 'CLJ17'])
prices = get_pricing(contracts, start_date='2016-11-01', end_date='2016-12-15', fields='price')
prices.columns = map(lambda x: x.symbol, prices.columns)
prices.plot();
# A toy example to show Contango
N = 100 # Days to expiry of futures contract
cost_of_carry = 0.01
spot_price = pd.Series(np.ones(N), name = "Spot Price")
futures_price = pd.Series(np.ones(N), name = "Futures Price")
spot_price[0] = 20
futures_price[0] = spot_price[0]*np.exp(cost_of_carry*N)
for n in range(1, N):
spot_price[n] = spot_price[n-1]*(1 + np.random.normal(0, 0.05))
futures_price[n] = spot_price[n]*np.exp(cost_of_carry*(N - n))
spot_price.plot()
futures_price.plot()
plt.legend()
plt.title('Contango')
plt.xlabel('Time')
plt.ylabel('Price');
# A toy example to show Backwardation
N = 100 # Days to expiry of futures contract
cost_of_carry = -0.01
spot_price = pd.Series(np.ones(N), name = "Spot Price")
futures_price = pd.Series(np.ones(N), name = "Futures Price")
spot_price[0] = 20
futures_price[0] = spot_price[0]*np.exp(cost_of_carry*N)
for n in range(1, N):
spot_price[n] = spot_price[n-1]*(1 + np.random.normal(0, 0.05))
futures_price[n] = spot_price[n]*np.exp(cost_of_carry*(N - n))
spot_price.plot()
futures_price.plot()
plt.legend()
plt.title('Backwardation')
plt.xlabel('Time')
plt.ylabel('Price');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Derivatives
Step2: This is the long side payoff
Step3: And this is the short side payoff
Step4: For a long position on a forward contract, you benefit if the price at expiry is greater than the delivery price, while the opposite holds with a short position. However, even if you do not make a profit on your position there can be advantages. A forward contract locks in a price for a transaction, removing any uncertainty that you may have about a sale or purchase in the future. This is advantageous in cases where you know what you will need at some point in the future (or have a good idea of what you will need due to your models).
Step5: The plot shows some signifiant decreases in price over the chosen time period, which should be reflected by drops in the margin account.
Step6: Here we calculate when a margin call would occur as the futures price and margin account balance change.
Step7: The jump in the margin account balance that occurs after each vertical dotted line is the point at which we meet the margin call, increasing the margin account balance to our initial margin once more. Note that the lagged response to the second theoretical margin call in this example is due to a weekend. Notice how a small perturbations in the futures price lead to large changes in the margin account balance. This is a consequence of the inherent leverage.
Step8: The S&P 500 E-Mini contract has a value based on 50 units of the value of the S&P 500 Index. This financial future has a significant advantage in liquidity compared to natural gas for the same expiry. It likely helps that the S&P 500 E-Mini cash-settled, while the natural gas contract requires arrangements to be made for transportation and storage of fuel, but the main takeaway here is that there are a lot more people trying to trade financial futures.
Step9: As one contract fades out of the spotlight, the contract for the next month fades in. It is common practice to roll over positions in contracts, closing the previous month's positions and opening up equivalent positions in the next set of contracts. Note that when you create a futures object, you can access the expiration_date attribute to see when the contract will stop trading.
Step10: The expiration date for this crude oil contract is in December, but the delivery does not occur until January. This time lag between expiration and delivery varies for different underlyings. For example, the S&P 500 E-Mini contract, a financial future, has an expiration date in the same month as its delivery.
Step11: Spot Prices and Futures Prices
Step12: Looking at a plot of the prices does not tell us very much, unfortunately. It looks like the values might be getting closer, but we cannot quite tell. Let's look instead at the mean squared error between the ETF and futures prices.
Step13: This indeed seems to corroborate the point that futures prices approach the spot at expiry. And this makes sense. If we are close to expiry, there should be little difference between the price of acquiring a commodity or asset now and the price at the expiry date.
Step14: Contango and Backwardation
Step15: Backwardation occurs when the spot price is above the futures price and we have a negative basis. What this means is that it is cheaper to buy something right now than it would be to lock down for the future. This equates to a negative cost of carry.
|
4,760
|
<ASSISTANT_TASK:>
Python Code:
print(zeroes[0])
from sklearn.decomposition import PCA
both = [X[i] for i in range(len(y)) if y[i] == 0 or y[i] == 1]
labels = [y_ for y_ in y if y_ == 0 or y_ == 1]
pca = PCA(n_components=3)
Xproj3d = pca.fit_transform(both)
print(Xproj3d[labels.index(0)]) # labels.index(0) gives us the first index that is 0, i.e., what used to be zeroes[0]
print(Xproj3d)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax3d = Axes3D(fig)
ax3d.set_xlabel('X')
ax3d.set_ylabel('Y')
ax3d.set_zlabel('Z')
ax3d.view_init(elev=25., azim=120)
colors = ['orange' if y == 0 else 'lightblue' for y in labels]
ax3d.scatter(Xproj3d.T[0], Xproj3d.T[1], Xproj3d.T[2], c=colors, s=30, alpha=0.7)
pca = PCA(n_components=2)
Xproj2d = pca.fit_transform(both)
plt.scatter(Xproj2d.T[0], Xproj2d.T[1], c=colors, s=30, alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How can we visualize the distribution of these points in $\mathbb{R}^{64}$? We need to approximate the relative positions of the points in 1, 2 or 3 dimensions in order to get a sense for how they are distributed. We can do this with principal components analysis (PCA). The overarching idea here is that our points approximate some high dimensional manifold (a manifold with dimensionality less than or equal to 64), and we can approximate this approximation with dimensionality reduction techniques such as PCA. If this is all very confusing to you, don't worry about it! I'm purposely being vague in my description - a more thorough explanation would require a more rigorous treatment of the math behind PCA.
Step2: zeroes[0] is now approximated by this point in $\mathbb{R}^3$
Step3: Of course, this is completely meaningless without the rest of the points for context.
Step4: The 0s are an amber color and the 1s are turquoise in the above plot. It looks like they form two (three?) distinct clusters! How does the two-dimensional projection look?
|
4,761
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
# npoints uniformly randomly distributed points in the interval [0,3]
npnts =100
x = np.random.uniform(0.,3.,npnts)
# set y = mx + b plus random noise of size err
slope = 2.
intercept = 1.
err = .5
y = slope*x + intercept
y += np.random.normal(loc=y,scale=err)
# add some random noise to x variable as well
x += np.random.normal(loc=x,scale=err)
# And plot out the data
plt.figure()
plt.scatter(x,y)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Data')
plt.grid()
plt.show()
# Vandermonde matrix
A = np.array([ np.ones(x.shape), x]).T
# solve Ac = y using the QR decomposition via scipy
c_ls,res,rank,s = la.lstsq(A,y)
print 'Best fit Linear Least Squares:'
print ' slope={}'.format(c_ls[1])
print ' intercept={}'.format(c_ls[0])
# Data matrix
X = np.array([ x , y]).T
X_mean = np.mean(X,0)
print 'Mean of data matrix=',X_mean
# de-mean the data matrix
X -= X_mean
# now calculate the SVD of the de-meaned data matrix
U,S,VT = la.svd(X,full_matrices=False)
V = VT.T
print 'Singular values=', S
print 'First Right singular vector V=', V[:,0]
# dummy variables
t_ls = np.linspace(0,x.max())
t_svd = 2*(t_ls - np.mean(t_ls))
# make figure
plt.figure()
# plot data
plt.scatter(x,y)
# plot the least squares solution
plt.plot(t_ls,c_ls[0]+t_ls*c_ls[1],'r-',label='Least Squares')
# plot the total least Squares solution
# plot the mean
plt.plot(X_mean[0],X_mean[1],'go')
# calculate a line through the mean with the first principal component as a basis
L_tls = X_mean + np.outer(t_svd,V[:,0])
plt.plot(L_tls[:,0],L_tls[:,1],'c-',label='Total Least Squares')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Comparison Least Squares vs Total Least Squares')
plt.legend(loc='best')
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Total Least Squares
Step2: Classical Least Squares
Step3: Total Least Squares
Step4: Now plot and compare the two solutions
|
4,762
|
<ASSISTANT_TASK:>
Python Code:
y=np.linspace(-2,3,100)
x=np.exp(y)
plt.plot(x,y)
plt.xlabel('$x$')
plt.ylabel('$y=\ln x$')
plt.show()
plt.semilogx(x,y)
plt.xlabel('$x$')
plt.ylabel('$y=\ln x$')
plt.show()
x=np.logspace(0,10,100)
y=np.log(x)
plt.semilogx(x,y)
plt.semilogx(1/x,-y)
plt.xlabel('$x$')
plt.ylabel('$y=\ln x$')
plt.show()
x0=np.logspace(-5,5,1000,dtype=np.double)
epsilon=np.finfo(np.double).eps
best_precision=(epsilon/2)*np.abs(1./np.log(x0))
plt.loglog(x0,best_precision, '-k')
plt.loglog(x0,np.full(x0.shape, epsilon), '--r')
plt.xlabel("$Аргумент$")
plt.ylabel("$Относительная\,погрешность$")
plt.legend(["$Минимальная\,погр.$","$Машинная\,погр.$"])
plt.show()
def relative_error(x0,x): return np.abs(x0-x)/np.abs(x0)
def log_teylor_series(x, N=5):
a=x-1
a_k=a # x в степени k. Сначала k=1
y=a # Значене логарифма, пока для k=1.
for k in range(2,N): # сумма по степеням
a_k=-a_k*a # последовательно увеличиваем степень и учитываем множитель со знаком
y=y+a_k/k
return y
x=np.logspace(-5,1,1001)
y0=np.log(x)
y=log_teylor_series(x)
plt.loglog(x,relative_error(y0,y),'-k')
plt.loglog(x0,best_precision,'--r')
plt.xlabel('$x$')
plt.ylabel('$(y-y_0)/y_0$')
plt.legend(["$Достигнутая\;погр.$", "$Минимальная\;погр.$"],loc=5)
plt.show()
# Узлы итерполяции
N=5
xn=1+1./(1+np.arange(N))
yn=np.log(xn)
# Тестовые точки
x=np.linspace(1+1e-10,2,1000)
y=np.log(x)
# Многочлен лагранжа
import scipy.interpolate
L=scipy.interpolate.lagrange(xn,yn)
yl=L(x)
plt.plot(x,y,'-k')
plt.plot(xn,yn,'.b')
plt.plot(x,yl,'-r')
plt.xlabel("$x$")
plt.ylabel("$y=\ln x$")
plt.show()
plt.semilogy(x,relative_error(y,yl))
plt.xlabel("$Аргумент$")
plt.ylabel("$Относительная\;погрешность$")
plt.show()
def log_newton(x, N=10):
y=1 # начальное приближение
for j in range(N):
y=y-1+x/np.exp(y)
return y
x=np.logspace(-3,3,1000)
y0=np.log(x)
y=log_newton(x)
plt.loglog(x,relative_error(y0,y),'-k')
plt.xlabel("$Аргумент$")
plt.ylabel("$Относительная\;погрешность$")
plt.show()
B=8 # число используемых для составления таблицы бит мантиссы
table=np.log((np.arange(0,2**B, dtype=np.double)+0.5)/(2**B))
log2=np.log(2)
def log_table(x):
M,E=np.frexp(x)
return log2*E+table[(M*2**B).astype(np.int)]
x=np.logspace(-10,10,1000)
y0=np.log(x)
y=log_table(x)
plt.loglog(x,relative_error(y0,y),'-k')
plt.xlabel("$Аргумент$")
plt.ylabel("$Относительная\;погрешность$")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Для графического представления данных часто используется логарифмическая шкала, на которой находищиеся на одном расстоянии точки отличаются в одно и то же число раз.
Step2: Лоагрифм преобразует умножение в сложение
Step3: Задание 1. Выполните редукцию аргумента логарифма так, чтобы всегда получать значения из интервала $[1,1+\epsilon)$, где $\epsilon$ - маленькое положительное число. Каким свойством предпочтительнее воспользоваться $\ln x^2=2\ln x$ или $\ln \frac{x}{2}=\ln x-\ln 2$?
Step4: Формально при $x=1$ число обусловленности равно бесконечности (так как значение функции равно $0$), однако этот пик очень узкий, так что почти всюду значения могут быть найдены с машинной точностью, кроме узкого
Step5: Формула Эйлера дает аккуратное приближение функции только рядом с точкой разложения (в даном случае $x=1$), что мы и наблюдаем в эксперименте.
Step6: Как мы видим, погрешность стремится к нулю в узлах интерполяции, между узлами ошибка не растет выше некоторой величины, т.е. с точки зрения вычисления функции этот приближение гораздо лучше.
Step7: Задание 4. Начальное приближение в вышеприведенном алгоритме выбрано очень грубо, предложите лучшее приближение. Оцените число итераций, необходимое для получения лучшей возможной точности. Реализуйте метод Ньютона для найденного числа итераций. Удалось ли получить машиную точность? Почему? Почему при использовании 1 в качестве начального приближения итерации расходятся для $x$ заметно отличающихся от 1?
|
4,763
|
<ASSISTANT_TASK:>
Python Code:
# Run this cell, but please don't change it.
# These lines import the Numpy and Datascience modules.
import numpy as np
from datascience import *
# These lines do some fancy plotting magic
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import warnings
warnings.simplefilter('ignore', FutureWarning)
from matplotlib import patches
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# These lines load the tests.
from client.api.assignment import load_assignment
tests = load_assignment('lab08.ok')
# Just run this cell. (The simulation is actually not
# that complicated; it just takes a lot of code to draw
# everything. So you don't need to read this unless you
# have time and are curious about more advanced plotting.)
num_locations = 15
example_velocities = Table().with_columns(
"x", np.random.normal(size=num_locations),
"y", np.random.normal(size=num_locations))
start_of_time = -2
def scatter_after_time(t, start_of_time, end_of_time, velocities, center_name, other_point_name, make_title):
max_location = 1.1*(end_of_time-start_of_time)*max(max(abs(velocities.column("x"))), max(abs(velocities.column("y"))))
new_locations = velocities.with_columns(
"x", (t-start_of_time)*velocities.column("x"),
"y", (t-start_of_time)*velocities.column("y"))
plt.scatter(make_array(0), make_array(0), label=center_name, s=100, c="yellow")
plt.scatter(new_locations.column("x"), new_locations.column("y"), label=other_point_name)
for i in np.arange(new_locations.num_rows):
plt.arrow(
new_locations.column("x").item(i),
new_locations.column("y").item(i),
velocities.column("x").item(i),
velocities.column("y").item(i),
fc='black',
ec='black',
head_width=0.025*max_location,
lw=.15)
plt.xlim(-max_location, max_location)
plt.ylim(-max_location, max_location)
plt.gca().set_aspect('equal', adjustable='box')
plt.gca().set_position(make_array(0, 0, 1, 1))
plt.legend(bbox_to_anchor=(1.6, .7))
plt.title(make_title(t))
plt.show()
interact(
scatter_after_time,
t=widgets.FloatSlider(min=start_of_time, max=5, step=.05, value=0, msg_throttle=1),
start_of_time=fixed(start_of_time),
end_of_time=fixed(5),
velocities=fixed(example_velocities),
center_name=fixed("our sun"),
other_point_name=fixed("other star"),
make_title=fixed(lambda t: "The world {:01g} year{} in the {}".format(abs(t), "" if abs(t) == 1 else "s", "past" if t < 0 else "future")));
# Run this cell to see a picture of Mei's locations over time.
mei_velocity = Table().with_columns("x", make_array(60), "y", make_array(0))
interact(
scatter_after_time,
t=widgets.FloatSlider(min=-2, max=1, step=.05, value=0, msg_throttle=1),
start_of_time=fixed(-2),
end_of_time=fixed(1),
velocities=fixed(mei_velocity),
center_name=fixed("Us"),
other_point_name=fixed("Mei"),
make_title=fixed(lambda t: "Mei's position {:01g} hour{} in the {}".format(abs(t), "" if abs(t) == 1 else "s", "past" if t < 0 else "future")));
# Just run this cell.
small_driving_example = Table().with_columns(
"Name", make_array("Us", "Mei"),
"Speed moving away from us (miles per hour)", make_array(0, 60),
"Current distance from us (miles)", make_array(0, 120))
small_driving_example.scatter(1, 2, s=200, fit_line=True)
# Fancy magic to draw each person's name with their dot.
with_slope_indicator = small_driving_example.with_row(
["Slope = 2\ hours", small_driving_example.column(1).mean(), small_driving_example.column(2).mean()])
for i in range(with_slope_indicator.num_rows):
name = with_slope_indicator.column(0).item(i)
x = with_slope_indicator.column(1).item(i)
y = with_slope_indicator.column(2).item(i)
plt.scatter(make_array(x - 15), make_array(y + 15), s=1000*len(name), marker="$\mathrm{" + name + "}$")
# Just run this cell.
Table.read_table("drivers.csv").scatter(0, 1, fit_line=True)
# Fill in the start time you infer from the above line.
driving_start_time_hours = ...
driving_start_time_hours
_ = tests.grade('q3')
# Just run this cell.
close_novas = Table.read_table("close_novas.csv")
close_novas.scatter(0, 1, fit_line=True)
close_novas
# Fill this in manually by examining the line above.
first_guess_universe_age_years = ...
# This just shows your guess as a nice string, in billions of years.
"{:,} billion years".format(round(first_guess_universe_age_years / 1e9, 2))
_ = tests.grade('q4')
def errors(tbl, slope, intercept):
...
return ...
example_errors = ...
...
_ = tests.grade('q6')
def fit_line(tbl):
# Your code may need more than 1 line below here.
...
slope = ...
intercept = ...
return make_array(slope, intercept)
# Here is an example call to your function. To test your function,
# figure out the right slope and intercept by hand.
example_table = Table().with_columns(
"Speed (parsecs/year)", make_array(0, 1),
"Distance (million parsecs)", make_array(1, 3))
fit_line(example_table)
best_line = ...
best_line_slope = ...
best_line_intercept = ...
# This just shows your answer as a nice string, in billions of years.
"Slope: {:g} (corresponding to an estimated age of {:,} billion years)".format(best_line_slope, round(best_line_slope/1000, 4))
bootstrap_ages = make_array()
for i in np.arange(1000):
bootstrap_ages = ...
lower_end = ...
upper_end = ...
Table().with_column("Age estimate", bootstrap_ages*1e-9).hist(bins=np.arange(12, 16, .1), unit="billion years")
print("95% confidence interval for the age of the universe: [{:g}, {:g}] billion years".format(lower_end*1e-9, upper_end*1e-9))
# For your convenience, you can run this cell to run all the tests at once!
import os
_ = [tests.grade(q[:-3]) for q in os.listdir("tests") if q.startswith('q')]
# Run this cell to submit your work *after* you have passed all of the test cells.
# It's ok to run this cell multiple times. Only your final submission will be scored.
!TZ=America/Los_Angeles jupyter nbconvert --output=".lab08_$(date +%m%d_%H%M)_submission.html" lab08.ipynb && echo "Submitted successfully."
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Actual Big Bang Theory
Step2: Question 1
Step3: We want to know how long she's been driving, but we forgot to record the time when she left. If we find out that she's 120 miles away, and she's been going 60 miles per hour the whole time, we can infer that she left 2 hours ago.
Step4: The slope of the line is 2 hours. (The units are vertical-axis units divided by horizontal-axis units, which are $\frac{\texttt{miles}}{\texttt{miles} / \texttt{hour}}$, or hours.) So that's our answer.
Step5: Question 3
Step6: Back to cosmology
Step7: Question 4
Step8: Fitting the line yourself
Step9: Question 6
Step10: You should find that the errors are almost all negative. That means our line is a little bit too steep. Let's find a better one.
Step11: Question 8
Step12: That slope (multiplied by 1 million) is an estimate of the age of the universe. The current best estimate of the age of the universe (using slightly more sophisticated techniques) is 13.799 billion years. Did we get close?
|
4,764
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.ml.feature import StandardScaler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import StringIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from distkeras.transformers import LabelIndexTransformer
from distkeras.predictors import ModelPredictor
from distkeras.trainers import SingleTrainer
from distkeras.trainers import AEASGD
from distkeras.trainers import DOWNPOUR
# Modify these variables according to your needs.
application_name = "Distributed Deep Learning: Analysis"
using_spark_2 = False
local = False
if local:
# Tell master to use local resources.
master = "local[*]"
num_cores = 3
num_executors = 1
else:
# Tell master to use YARN.
master = "yarn-client"
num_executors = 8
num_cores = 2
# This variable is derived from the number of cores and executors, and will be used to assign the number of model trainers.
num_workers = num_executors * num_cores
print("Number of desired executors: " + `num_executors`)
print("Number of desired cores / executor: " + `num_cores`)
print("Total number of workers: " + `num_workers`)
conf = SparkConf()
conf.set("spark.app.name", application_name)
conf.set("spark.master", master)
conf.set("spark.executor.cores", `num_cores`)
conf.set("spark.executor.instances", `num_executors`)
conf.set("spark.executor.memory","2g")
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
# Check if the user is running Spark 2.0 +
if using_spark_2:
sc = SparkSession.builder.config(conf=conf) \
.appName(application_name) \
.getOrCreate()
else:
# Create the Spark context.
sc = SparkContext(conf=conf)
# Add the missing imports
from pyspark import SQLContext
sqlContext = SQLContext(sc)
# Check if we are using Spark 2.0
if using_spark_2:
reader = sc
else:
reader = sqlContext
# Read the dataset.
raw_dataset = reader.read.parquet("data/processed.parquet")
# Check the schema.
raw_dataset.printSchema()
nb_features = len(raw_dataset.select("features_normalized").take(1)[0]["features_normalized"])
nb_classes = len(raw_dataset.select("label").take(1)[0]["label"])
print("Number of features: " + str(nb_features))
print("Number of classes: " + str(nb_classes))
# Finally, we create a trainingset and a testset.
(training_set, test_set) = raw_dataset.randomSplit([0.7, 0.3])
training_set.cache()
test_set.cache()
# Distribute the training and test set to the workers.
test_set = test_set.repartition(num_workers)
training_set = training_set.repartition(num_workers)
num_test_set = test_set.count()
num_training_set = training_set.count()
print("Number of testset instances: " + str(num_test_set))
print("Number of trainingset instances: " + str(num_training_set))
print("Total number of instances: " + str(num_test_set + num_training_set))
model = Sequential()
model.add(Dense(500, input_shape=(nb_features,)))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dropout(0.6))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# Summarize the model.
model.summary()
optimizer = 'adagrad'
loss = 'categorical_crossentropy'
def evaluate(model):
global test_set
metric_name = "f1"
evaluator = MulticlassClassificationEvaluator(metricName=metric_name, predictionCol="prediction_index", labelCol="label_index")
# Clear the prediction column from the testset.
test_set = test_set.select("features_normalized", "label", "label_index")
# Apply a prediction from a trained model.
predictor = ModelPredictor(keras_model=trained_model, features_col="features_normalized")
test_set = predictor.predict(test_set)
# Transform the prediction vector to an indexed label.
index_transformer = LabelIndexTransformer(output_dim=nb_classes)
test_set = index_transformer.transform(test_set)
# Store the F1 score of the SingleTrainer.
score = evaluator.evaluate(test_set)
return score
results = {}
time_spent = {}
trainer = SingleTrainer(keras_model=model, loss=loss, worker_optimizer=optimizer,
features_col="features_normalized", num_epoch=1, batch_size=64)
trained_model = trainer.train(training_set)
# Fetch the training time.
dt = trainer.get_training_time()
print("Time spent (SingleTrainer): " + `dt` + " seconds.")
# Evaluate the model.
score = evaluate(trained_model)
print("F1 (SingleTrainer): " + `score`)
# Store the training metrics.
results['single'] = score
time_spent['single'] = dt
trainer = AEASGD(keras_model=model, worker_optimizer=optimizer, loss=loss, num_workers=num_workers, batch_size=64,
features_col="features_normalized", num_epoch=1, communication_window=32,
rho=5.0, learning_rate=0.1)
trainer.set_parallelism_factor(1)
trained_model = trainer.train(training_set)
# Fetch the training time.
dt = trainer.get_training_time()
print("Time spent (AEASGD): " + `dt` + " seconds.")
# Evaluate the model.
score = evaluate(trained_model)
print("F1 (AEASGD): " + `score`)
# Store the training metrics.
results['aeasgd'] = score
time_spent['aeasgd'] = dt
trainer = DOWNPOUR(keras_model=model, worker_optimizer=optimizer, loss=loss, num_workers=num_workers,
batch_size=64, communication_window=5, learning_rate=0.1, num_epoch=1,
features_col="features_normalized")
trainer.set_parallelism_factor(1)
trained_model = trainer.train(training_set)
# Fetch the training time.
dt = trainer.get_training_time()
print("Time spent (DOWNPOUR): " + `dt` + " seconds.")
# Evaluate the model.
score = evaluate(trained_model)
print("F1 (DOWNPOUR): " + `score`)
# Store the training metrics.
results['downpour'] = score
time_spent['downpour'] = dt
# Plot the time.
fig = plt.figure()
st = fig.suptitle("Lower is better.", fontsize="x-small")
plt.bar(range(len(time_spent)), time_spent.values(), align='center')
plt.xticks(range(len(time_spent)), time_spent.keys())
plt.xlabel("Optimizers")
plt.ylabel("Seconds")
plt.ylim([0, 7000])
plt.show()
# Plot the statistical performanc of the optimizers.
fig = plt.figure()
st = fig.suptitle("Higer is better.", fontsize="x-small")
plt.bar(range(len(results)), results.values(), align='center')
plt.xticks(range(len(results)), results.keys())
plt.xlabel("Optimizers")
plt.ylabel("F1")
plt.ylim([0.83,0.85])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Spark Configuration and Preparation
Step2: Data Preparation
Step3: After reading the dataset from storage, we will extract several metrics such as nb_features, which basically is the number of input neurons, and nb_classes, which is the number of classes (signal and background).
Step4: Finally, we split up the dataset for training and testing purposes, and fetch some additional statistics on the number of training and testing instances.
Step5: Model construction
Step6: Model evaluation
Step7: Model training and evaluation
Step8: Asynchronous EASGD
Step9: DOWNPOUR
Step10: Results
|
4,765
|
<ASSISTANT_TASK:>
Python Code:
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - np.random.rand(8))
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate sample data
Step2: Add noise to targets
Step3: Fit regression model
Step4: look at the results
|
4,766
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Uninstall opencv-python to avoid a conflict (in Colab) with the opencv-python-headless package that tf-models uses.
!pip uninstall -y opencv-python
!pip install -U -q "tensorflow>=2.9.0" "tf-models-official"
import tensorflow_models as tfm
import orbit
import glob
import os
import pathlib
import tempfile
import time
import numpy as np
import tensorflow as tf
from official.nlp.data import sentence_prediction_dataloader
from official.nlp import optimization
logical_device_names = [logical_device.name for logical_device in tf.config.list_logical_devices()]
if 'GPU' in ''.join(logical_device_names):
strategy = tf.distribute.MirroredStrategy()
elif 'TPU' in ''.join(logical_device_names):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.OneDeviceStrategy(logical_device_names[0])
max_seq_length = 128
learning_rate = 3e-5
num_train_epochs = 3
train_batch_size = 32
eval_batch_size = 64
train_data_size = 3668
steps_per_epoch = int(train_data_size / train_batch_size)
train_steps = steps_per_epoch * num_train_epochs
warmup_steps = int(train_steps * 0.1)
print("train batch size: ", train_batch_size)
print("train epochs: ", num_train_epochs)
print("steps_per_epoch: ", steps_per_epoch)
model_dir = pathlib.Path(tempfile.mkdtemp())
print(model_dir)
with strategy.scope():
encoder_network = tfm.nlp.encoders.build_encoder(
tfm.nlp.encoders.EncoderConfig(type="bert"))
classifier_model = tfm.nlp.models.BertClassifier(
network=encoder_network, num_classes=2)
optimizer = optimization.create_optimizer(
init_lr=3e-5,
num_train_steps=steps_per_epoch * num_train_epochs,
num_warmup_steps=warmup_steps,
end_lr=0.0,
optimizer_type='adamw')
tf.keras.utils.plot_model(classifier_model)
bert_dir = 'gs://cloud-tpu-checkpoints/bert/v3/uncased_L-12_H-768_A-12/'
tf.io.gfile.listdir(bert_dir)
bert_checkpoint = bert_dir + 'bert_model.ckpt'
def init_from_ckpt_fn():
init_checkpoint = tf.train.Checkpoint(**classifier_model.checkpoint_items)
with strategy.scope():
(init_checkpoint
.read(bert_checkpoint)
.expect_partial()
.assert_existing_objects_matched())
with strategy.scope():
init_from_ckpt_fn()
checkpoint = tf.train.Checkpoint(model=classifier_model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=model_dir,
max_to_keep=5,
step_counter=optimizer.iterations,
checkpoint_interval=steps_per_epoch,
init_fn=init_from_ckpt_fn)
train_data_path = "gs://download.tensorflow.org/data/model_garden_colab/mrpc_train.tf_record"
eval_data_path = "gs://download.tensorflow.org/data/model_garden_colab/mrpc_eval.tf_record"
def _dataset_fn(input_file_pattern,
global_batch_size,
is_training,
input_context=None):
data_config = sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=input_file_pattern,
seq_length=max_seq_length,
global_batch_size=global_batch_size,
is_training=is_training)
return sentence_prediction_dataloader.SentencePredictionDataLoader(
data_config).load(input_context=input_context)
train_dataset = orbit.utils.make_distributed_dataset(
strategy, _dataset_fn, input_file_pattern=train_data_path,
global_batch_size=train_batch_size, is_training=True)
eval_dataset = orbit.utils.make_distributed_dataset(
strategy, _dataset_fn, input_file_pattern=eval_data_path,
global_batch_size=eval_batch_size, is_training=False)
def loss_fn(labels, logits):
Classification loss.
labels = tf.squeeze(labels)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
tf.cast(labels, dtype=tf.int32), depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(
tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1)
return tf.reduce_mean(per_example_loss)
def trainer_init(self,
train_dataset,
model,
optimizer,
strategy):
self.strategy = strategy
with self.strategy.scope():
self.model = model
self.optimizer = optimizer
self.global_step = self.optimizer.iterations
self.train_loss = tf.keras.metrics.Mean(
'training_loss', dtype=tf.float32)
orbit.StandardTrainer.__init__(self, train_dataset)
def train_loop_begin(self):
self.train_loss.reset_states()
def train_step(self, iterator):
def step_fn(inputs):
labels = inputs.pop("label_ids")
with tf.GradientTape() as tape:
model_outputs = self.model(inputs, training=True)
# Raw loss is used for reporting in metrics/logs.
raw_loss = loss_fn(labels, model_outputs)
# Scales down the loss for gradients to be invariant from replicas.
loss = raw_loss / self.strategy.num_replicas_in_sync
grads = tape.gradient(loss, self.model.trainable_variables)
optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
# For reporting, the metric takes the mean of losses.
self.train_loss.update_state(raw_loss)
self.strategy.run(step_fn, args=(next(iterator),))
def train_loop_end(self):
return {
self.train_loss.name: self.train_loss.result(),
}
class BertClassifierTrainer(orbit.StandardTrainer):
__init__ = trainer_init
train_loop_begin = train_loop_begin
train_step = train_step
train_loop_end = train_loop_end
def evaluator_init(self,
eval_dataset,
model,
strategy):
self.strategy = strategy
with self.strategy.scope():
self.model = model
self.eval_loss = tf.keras.metrics.Mean(
'evaluation_loss', dtype=tf.float32)
self.eval_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='accuracy', dtype=tf.float32)
orbit.StandardEvaluator.__init__(self, eval_dataset)
def eval_begin(self):
self.eval_accuracy.reset_states()
self.eval_loss.reset_states()
def eval_end(self):
return {
self.eval_accuracy.name: self.eval_accuracy.result(),
self.eval_loss.name: self.eval_loss.result(),
}
def eval_step(self, iterator):
def step_fn(inputs):
labels = inputs.pop("label_ids")
model_outputs = self.model(inputs, training=True)
loss = loss_fn(labels, model_outputs)
self.eval_loss.update_state(loss)
self.eval_accuracy.update_state(labels, model_outputs)
self.strategy.run(step_fn, args=(next(iterator),))
class BertClassifierEvaluator(orbit.StandardEvaluator):
__init__ = evaluator_init
eval_begin = eval_begin
eval_end = eval_end
eval_step = eval_step
trainer = BertClassifierTrainer(
train_dataset, classifier_model, optimizer, strategy)
evaluator = BertClassifierEvaluator(
eval_dataset, classifier_model, strategy)
controller = orbit.Controller(
trainer=trainer,
evaluator=evaluator,
global_step=trainer.global_step,
steps_per_loop=20,
checkpoint_manager=checkpoint_manager)
result = controller.train_and_evaluate(
train_steps=steps_per_epoch * num_train_epochs,
eval_steps=-1,
eval_interval=steps_per_epoch)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fine-tuning a BERT model with Orbit
Step2: The tf-models-official package contains both the orbit and tensorflow_models modules.
Step3: Setup for training
Step4: Configure the distribution strategy
Step5: For more information about the TPU setup, refer to the TPU guide.
Step6: Create a BERT Classifier model and a simple optimizer. They must be created inside strategy.scope so that the variables can be distributed.
Step7: Initialize from a Checkpoint
Step8: To use Orbit, create a tf.train.CheckpointManager object.
Step9: Create distributed datasets
Step11: Create a loss function
Step12: </devsite-expandable></div>
Step13: Before starting a run of the training loop, the train_loop_begin method will reset the train_loss metric.
Step14: The train_step is a straight-forward loss-calculation and gradient update that is run by the distribution strategy. This is accomplished by defining the gradient step as a nested function (step_fn).
Step15: The orbit.StandardTrainer handles the @tf.function and loops.
Step16: Build a subclass of orbit.StandardTrainer with those methods.
Step17: Define the evaluator class
Step18: Similar to the trainer, the eval_begin and eval_end methods just need to reset the metrics before the loop and then report the results after the loop.
Step19: The eval_step method works like train_step. The inner step_fn defines the actual work of calculating the loss & accuracy and updating the metrics. The outer eval_step receives tf.distribute.DistributedIterator as input, and uses Strategy.run to launch the distributed execution to step_fn, feeding it from the distributed iterator.
Step20: Build a subclass of orbit.StandardEvaluator with those methods.
Step21: End-to-end training and evaluation
|
4,767
|
<ASSISTANT_TASK:>
Python Code:
print("Hello, World!")
print("\N{WAVING HAND SIGN}, \N{EARTH GLOBE ASIA-AUSTRALIA}!")
print("First this line is printed,")
print("and then this one.")
print("This line is missing something."
from client.api.notebook import Notebook
ok = Notebook('Intro.ok')
# Examples of expressions:
2 + 2
'me' + ' and I'
12 ** 2
6 + 4
a = 4
b = 10/5
# Notice that 'a' retains its value.
print(a)
a + b
# Fill in the missing lines to complete the expressions.
x = ...
...
...
print(...)
ok.grade('q01')
# an empty list
lst = []
print(lst)
# reassigning our empty list to a new list
lst = [1, 3, 6, 'lists', 'are' 'fun', 4]
print(lst)
# Elements are selected like this:
example = lst[2]
# The above line selects the 3rd element of lst (list indices are 0-offset) and sets it to a variable named example.
print(example)
### This line will store the first (inclusive) through fourth (exclusive) elements of lst as a new list called lst_2:
lst_2 = lst[1:4]
lst_2
### Fill in the ellipses to complete the question.
my_list = ...
my_list_sliced = my_list[...]
last_of_sliced = ...
print(...)
ok.grade('q02')
# A list containing six integers.
a_list = [1, 6, 4, 8, 13, 2]
# Another list containing six integers.
b_list = [4, 5, 2, 14, 9, 11]
print('Max of a_list:', max(a_list))
print('Min of b_list:', min(a_list))
# Concatenate a_list and b_list:
c_list = a_list + b_list
print('Concatenated:', c_list)
import numpy as np
# Initialize an array of integers 0 through 9.
example_array = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# This can also be accomplished using np.arange
example_array_2 = np.arange(10)
print('Undoubled Array:')
print(example_array_2)
# Double the values in example_array and print the new array.
double_array = example_array*2
print('Doubled Array:')
print(double_array)
example_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
example_list * 2
new_list = []
for element in example_array:
new_element = element + 5
new_list.append(new_element)
new_list
newer_list = []
for completely_arbitrary_name in example_array:
newer_element = completely_arbitrary_name + 5
newer_list.append(newer_element)
newer_list
for i in range(len(example_array)):
example_array[i] = example_array[i] + 5
example_array
while_array = np.arange(10) # Generate our array of values
print('Before:', while_array)
while(max(while_array) < 50): # Set our conditional
while_array[4] += 1 # Add 1 to the fifth element if the conditional is satisfied
print('After:', while_array)
# Make use of iterators, range, length, while loops, and indices to complete this question.
question_3 = np.array([12, 31, 50, 0, 22, 28, 19, 105, 44, 12, 77])
for i in range(len(...)):
while(...):
question_3[i] = ...
for element in question_3:
print(...)
ok.grade('q03')
# An adder function that adds 2 to the given n.
def add_two(n):
return n + 2
add_two(5)
def is_multiple(m, n):
if (m % n == 0):
return True
else:
return False
is_multiple(12, 4)
is_multiple(12, 7)
# Change possible_prime to any integer to test its primality
# NOTE: If you happen to stumble across a large (> 8 digits) prime number, the cell could take a very, very long time
# to run and will likely crash your kernel. Just click kernel>interrupt if it looks like it's caught.
possible_prime = 9999991
for i in range(2, possible_prime):
if (is_multiple(possible_prime, i)):
print(possible_prime, 'is not prime')
break
if (i >= possible_prime/2):
print(possible_prime, 'is prime')
break
def replace_with_y(lst, x, y):
for i in range(...):
if(...):
...
return lst
ok.grade('q04')
import pandas as pd
top_10_movies = pd.DataFrame(data=np.array(
[[9.2, 'The Shawshank Redemption (1994)'],
[9.2, 'The Godfather (1972)'],
[9., 'The Godfather: Part II (1974)'],
[8.9, 'Pulp Fiction (1994)'],
[8.9, "Schindler's List (1993)"],
[8.9, 'The Lord of the Rings: The Return of the King (2003)'],
[8.9, '12 Angry Men (1957)'],
[8.9, 'The Dark Knight (2008)'],
[8.9, 'Il buono, il brutto, il cattivo (1966)'],
[8.8, 'The Lord of the Rings: The Fellowship of the Ring (2001)']]), columns=["Rating", "Movie"])
top_10_movies
top_10_movies_dict = {"Rating" : [9.2, 9.2, 9., 8.9, 8.9, 8.9, 8.9, 8.9, 8.9, 8.8],
"Movie" : ['The Shawshank Redemption (1994)',
'The Godfather (1972)',
'The Godfather: Part II (1974)',
'Pulp Fiction (1994)',
"Schindler's List (1993)",
'The Lord of the Rings: The Return of the King (2003)',
'12 Angry Men (1957)',
'The Dark Knight (2008)',
'Il buono, il brutto, il cattivo (1966)',
'The Lord of the Rings: The Fellowship of the Ring (2001)']}
top_10_movies_2 = pd.DataFrame(data=top_10_movies_dict, columns=["Rating", "Movie"])
top_10_movies_2
# Run this cell to read in the table
vot = pd.read_csv("data/vots.csv")
vot.head()
# Run this cell and see what it outputs
vot["closure"]
vot["closure"][0]
## Note: .head() returns the first five rows of the table
vot[['pclo', 'tclo', 'kclo', 'pvot', 'tvot', 'kvot']].head()
vot[[0, 1, 2, 3]].head()
vot.drop("gender", axis=1).head()
vot[20:31]
vot[vot["height"] >= 170][["closure", "vot"]]
vot[(vot["language"] == "English") & ((vot["tvot"] < .01) | (vot["kvot"] > .08))]
vot.sort_values("height")
vot.sort_values("height", ascending=False)
vot["pvot"].min()
vot["pvot"].max()
vot["pvot"].mean()
%matplotlib inline
import matplotlib.pyplot as plt
vot.plot(x='pvot', y='kvot', kind='scatter')
closure = vot['tclo']
height = vot['height']
#Plot the data by inputting the x and y axis
plt.scatter(closure, height)
# we can then go on to customize the plot with labels
plt.xlabel("T Closure")
plt.ylabel("Height")
pclo = ...
kclo = ...
plt.scatter(pclo, kclo)
plt.xlabel(...)
plt.ylabel(...)
# note: plt.show() is the equivalent of print, but for graphs
plt.show()
ok.grade('q05')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And this one
Step2: The fundamental building block of Python code is an expression. Cells can contain multiple lines with multiple expressions. When you run a cell, the lines of code are executed in the order in which they appear. Every print expression prints a line. Run the next cell and notice the order of the output.
Step3: Understanding Check 2 Change the cell above so that it prints out
Step4: You should see something like this (minus our annotations)
Step5: Part 1
Step6: You will notice that only the last line in a cell gets printed out. If you want to see the values of previous expressions, you need to call print on that expression. Try adding print statements to some of the above expressions to get them to display.
Step7: Notice that when you create a variable, unlike what you previously saw with the expressions, it does not print anything out.
Step8: Question 1
Step9: Running the cell below will give you some feed back on your responses. Though the OK tests are not always comprehensive (passing all of the tests does not guarantee full credit for questions), they give you a pretty good indication as to whether or not you're on track.
Step10: C. Lists
Step11: To access a value in the list, put the index of the item you wish to access in brackets following the variable that stores the list. Lists in Python are zero-indexed, so the indicies for lst are 0, 1, 2, 3, 4, 5, and 6.
Step12: Slicing lists
Step13: Question 2
Step14: Lists can also be operated on with a few built-in analysis functions. These include min and max, among others. Lists can also be concatenated together. Find some examples below.
Step15: D. Numpy Arrays
Step16: This behavior differs from that of a list. See below what happens if you multiply a list.
Step17: Notice that instead of multiplying each of the elements by two, multiplying a list and a number returns that many copies of that list. This is the reason that we will sometimes use Numpy over lists. Other mathematical operations have interesting behaviors with lists that you should explore on your own.
Step18: The most important line in the above cell is the "for element in..." line. This statement sets the structure of our loop, instructing the machine to stop at every number in example_array, perform the indicated operations, and then move on. Once Python has stopped at every element in example_array, the loop is completed and the final line, which outputs new_list, is executed. It's important to note that "element" is an arbitrary variable name used to represent whichever index value the loop is currently operating on. We can change the variable name to whatever we want and achieve the same result, as long as we stay consistent. For example
Step19: For loops can also iterate over ranges of numerical values. If I wanted to alter example_array without copying it over to a new list, I would use a numerical iterator to access list indices rather than the elements themselves. This iterator, called i, would range from 0, the value of the first index, to 9, the value of the last. I can make sure of this by using the built-in range and len functions.
Step20: Other types of loops
Step21: Question 3
Step22: F. Functions!
Step23: Easy enough, right? Let's look at a function that takes two parameters, compares them somehow, and then returns a boolean value (True or False) depending on the comparison. The is_multiple function below takes as parameters an integer m and an integer n, checks if m is a multiple of n, and returns True if it is. Otherwise, it returns False.
Step24: Sidenote
Step25: Question 4
Step26: Part 2
Step27: Creating dataframes
Step28: Alternatively, we can store data in a dictionary instead of in lists. A dictionary keeps a mapping of keys to a set of values, and each key is unique. Using our top 10 movies example, we could create a dictionary that contains ratings a key, and movie titles as another key.
Step29: Now, we can use this dictionary to create a table with columns Rating and Movie
Step30: Notice how both ways return the same table! However, the list method created the table by essentially taking the lists and making up the rows of the table, while the dictionary method took the keys from the dictionary to make up the columns of the table. In this way, dataframes can be viewed as a collection of basic data structures, either through collecting rows or columns.
Step31: The pd.read_csv function expects a path to a .csv file as its input, and will return a data table created from the data contained in the csv.
Step32: Indexing Dataframes
Step33: Notice how the above cell returns an array of all the closure values in their original order.
Step34: Pandas columns have many of the same properties as numpy arrays. Keep in mind that pandas dataframes, as well as many other data structures, are zero-indexed, meaning indexes start at 0 and end at the number of elements minus one.
Step35: You can also use column indices instead of names.
Step36: Alternatively, you can also get rid of columns you dont need using .drop()
Step37: Finally, you can use square bracket notation to index rows by their indices with a single set of brackets. You must specify a range of values for which you want to index. For example, if I wanted the 20th to 30th rows of accounts
Step38: Filtering Data
Step39: The vot table is being indexed by the condition vot["height"] >= 170, which returns a table where only rows that have a "height" greater than $170$ is returned. We then index this table with the double bracket notation from the previous section to only get the closure and vot columns.
Step40: Many different conditions can be included to filter, and you can use & and | operators to connect them together. Make sure to include parantheses for each condition!
Step41: But wait! The table looks like it's sorted in increasing order. This is because sort_values defaults to ordering the column in ascending order. To correct this, add in the extra optional parameter
Step42: Now we can clearly see that the tallest person was 185.42 cm tall.
Step43: To find the maximum value, call max().
Step44: And to find the average value of a column, use mean().
Step45: Part 3
Step46: One of the advantages of pandas is its built-in plotting methods. We can simply call .plot() on a dataframe to plot columns against one another. All that we have to do is specify which column to plot on which axis. We include the extra variable kind='scatter' to override the default lineplot.
Step47: The base package for most plotting in Python is matplotlib. Below we will look at how to plot with it. First we will extract the columns that we are interested in, then plot them in a scatter plot. Note that plt is the common convention for matplotlib.pyplot.
Step48: Though matplotlib is sometimes considered an "ugly" plotting tool, it is powerful. It is highly customizable and is the foundation for most Python plotting libraries. Check out the documentation to get a sense of all of the things you can do with it, which extend far beyond scatter and line plots. An arguably more attractive package is seaborn, which we will go over in future notebooks.
|
4,768
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display, Latex, Markdown
mark_text = "_Ejemplo_ de **markdown** \nHola mundo"
display(Markdown(mark_text))
fila1 = "|columna 1|columna 2|"
filaalineacion = "|---:|:---:|"
fila2 = "|der|cen|"
display(Markdown(fila1+"\n"+filaalineacion+"\n"+fila2))
latexexp = "$$\\frac{mc^2}{2}$$"
display(Latex(latexexp))
import IPython.display as Display
dir(Display)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A continuación usaremos una celda de código en la cual guardaremos una expresión markdown en una variable python para su posterior visualización.
Step2: El método display cumple la función de renderización de código html en el navegador, donde dicho código es generado por otro método, en este caso por el método markdown que convierte markdown a html y si almacena en un objeto de ipython para dicho fin.
Step3: En este ejemplo las variables fueron asignadas de manera estatica, pero usted puede generar un código que cree el contenido de cada celda, y uniendo su representación en string a las cadenas necesarias para formar el patron markdown de una tabla.
Step4: Otras opciones de visualización soportadas son
|
4,769
|
<ASSISTANT_TASK:>
Python Code:
#numerical library
import numpy as np
#plot library
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from pprint import pprint
x,y = np.indices([1024,1024])
%timeit (x**2+y**2)**0.5
%timeit 0.7531854654594905*(x+y)
epoch_of_training = 1000
learning_rate = 1e-8
gamma = 1
init_sq_err = np.sum(0.5*((gamma*(x+y) - (x**2+y**2)**0.5)**2))
init_sq_err
for i in range(epoch_of_training):
gamma -= learning_rate * np.mean((gamma*(x+y)-(x**2+y**2)**0.5)*(x+y))
gamma
fin_sq_err = np.sum(0.5*((gamma*(x+y) - (x**2+y**2)**0.5)**2))
fin_sq_err
delta_sq_err = init_sq_err - fin_sq_err
delta_sq_err
Error = abs(gamma*(x+y) - (x**2+y**2)**0.5)
print(np.max(Error))
print(np.mean(Error))
print(np.min(Error))
fig = plt.figure(figsize=[12,12])
ax = fig.gca(projection='3d')
X = np.arange(1, 1024, 8)
Y = np.arange(1, 1024, 8)
X, Y = np.meshgrid(X, Y)
Z = np.sqrt(X**2 + Y**2)
F = abs(gamma*(X+Y)-Z)/(Z)
surf = ax.plot_surface(X, Y, F, rstride=2, cstride=2, cmap=cm.jet,linewidth=1)
ax.set_xlabel('X')
ax.set_xlim(-10, 1034)
ax.set_ylabel('Y')
ax.set_ylim(-10, 1034)
ax.set_zlabel('Z')
ax.set_zlim(0, 0.30)
ax.invert_yaxis()
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: inizialization of the variable
Step2: Guessing the speed gain if this work
Step3: So if this approximation work we can stimate the norm over x7 time faster
Step4: Initial Error
Step5: the partial derivative of the error
Step6: Results
Step7: Percentual Error Plot
|
4,770
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from theano import function
raise NotImplementedError("TODO: add any other imports you need")
def make_scalar():
Returns a new Theano scalar.
raise NotImplementedError("TODO: implement this function.")
def log(x):
Returns the logarithm of a Theano scalar x.
raise NotImplementedError("TODO: implement this function.")
def add(x, y):
Adds two theano scalars together and returns the result.
raise NotImplementedError("TODO: implement this function.")
# The following code will use your code and test it.
a = make_scalar()
b = make_scalar()
c = log(b)
d = add(a, c)
f = function([a, b], d)
a = np.cast[a.dtype](1.)
b = np.cast[b.dtype](2.)
actual = f(a, b)
expected = 1. + np.log(2.)
assert np.allclose(actual, expected)
print("SUCCESS!")
%load solutions/01_scalar_soln.py
import numpy as np
from theano import function
raise NotImplementedError("TODO: add any other imports you need")
def make_vector():
Returns a new Theano vector.
raise NotImplementedError("TODO: implement this function.")
def make_matrix():
Returns a new Theano matrix.
raise NotImplementedError("TODO: implement this function.")
def elemwise_mul(a, b):
a: A theano matrix
b: A theano matrix
Returns the elementwise product of a and b
raise NotImplementedError("TODO: implement this function.")
def matrix_vector_mul(a, b):
a: A theano matrix
b: A theano vector
Returns the matrix-vector product of a and b
raise NotImplementedError("TODO: implement this function.")
# The following code will use your code and test it.
a = make_vector()
b = make_vector()
c = elemwise_mul(a, b)
d = make_matrix()
e = matrix_vector_mul(d, c)
f = function([a, b, d], e)
rng = np.random.RandomState([1, 2, 3])
a_value = rng.randn(5).astype(a.dtype)
b_value = rng.rand(5).astype(b.dtype)
c_value = a_value * b_value
d_value = rng.randn(5, 5).astype(d.dtype)
expected = np.dot(d_value, c_value)
actual = f(a_value, b_value, d_value)
assert np.allclose(actual, expected)
print("SUCCESS!")
%load solutions/02_vector_mat_soln.py
import numpy as np
from theano import function
raise NotImplementedError("TODO: add any other imports you need")
def make_tensor(dim):
Returns a new Theano tensor with no broadcastable dimensions.
dim: the total number of dimensions of the tensor.
(You can use any dtype you like)
raise NotImplementedError("TODO: implement this function.")
def broadcasted_add(a, b):
a: a 3D theano tensor
b: a 4D theano tensor
Returns c, a 4D theano tensor, where
c[i, j, k, l] = a[l, k, i] + b[i, j, k, l]
for all i, j, k, l
raise NotImplementedError("TODO: implement this function.")
def partial_max(a):
a: a 4D theano tensor
Returns b, a theano matrix, where
b[i, j] = max_{k,l} a[i, k, l, j]
for all i, j
raise NotImplementedError("TODO: implement this function.")
# The following code use your code and test it.
a = make_tensor(3)
b = make_tensor(4)
c = broadcasted_add(a, b)
d = partial_max(c)
f = function([a, b], d)
rng = np.random.RandomState([1, 2, 3])
a_value = rng.randn(2, 2, 2).astype(a.dtype)
b_value = rng.rand(2, 2, 2, 2).astype(b.dtype)
c_value = np.transpose(a_value, (2, 1, 0))[:, None, :, :] + b_value
expected = c_value.max(axis=1).max(axis=1)
actual = f(a_value, b_value)
assert np.allclose(actual, expected), (actual, expected)
print("SUCCESS!")
%load solutions/03_tensor_soln.py
from theano import tensor as T
raise NotImplementedError("TODO: add any other imports you need")
def evaluate(x, y, expr, x_value, y_value):
x: A theano variable
y: A theano variable
expr: A theano expression involving x and y
x_value: A numpy value
y_value: A numpy value
Returns the value of expr when x_value is substituted for x
and y_value is substituted for y
raise NotImplementedError("TODO: implement this function.")
# The following code use your code and test it.
x = T.iscalar()
y = T.iscalar()
z = x + y
assert evaluate(x, y, z, 1, 2) == 3
print("SUCCESS!")
%load solutions/04_function_soln.py
import numpy as np
raise NotImplementedError("TODO: add any other imports you need")
def make_shared(shape):
Returns a theano shared variable containing a tensor of the specified
shape.
You can use any value you want.
raise NotImplementedError("TODO: implement the function")
def exchange_shared(a, b):
a: a theano shared variable
b: a theano shared variable
Uses get_value and set_value to swap the values stored in a and b
raise NotImplementedError("TODO: implement the function")
def make_exchange_func(a, b):
a: a theano shared variable
b: a theano shared variable
Returns f
where f is a theano function, that, when called, swaps the
values in a and b
f should not return anything
raise NotImplementedError("TODO: implement the function")
# The following code will use your code and test it.
a = make_shared((5, 4, 3))
assert a.get_value().shape == (5, 4, 3)
b = make_shared((5, 4, 3))
assert a.get_value().shape == (5, 4, 3)
a.set_value(np.zeros((5, 4, 3), dtype=a.dtype))
b.set_value(np.ones((5, 4, 3), dtype=b.dtype))
exchange_shared(a, b)
assert np.all(a.get_value() == 1.)
assert np.all(b.get_value() == 0.)
f = make_exchange_func(a, b)
rval = f()
assert isinstance(rval, list)
assert len(rval) == 0
assert np.all(a.get_value() == 0.)
assert np.all(b.get_value() == 1.)
print("SUCCESS!")
%load solutions/05_shared_soln.py
from theano import tensor as T
def grad_sum(x, y, z):
x: A theano variable
y: A theano variable
z: A theano expression involving x and y
Returns dz / dx + dz / dy
raise NotImplementedError("TODO: implement this function.")
# The following code will use your code and test it.
x = T.scalar()
y = T.scalar()
z = x + y
s = grad_sum(x, y, z)
assert s.eval({x: 0, y: 0}) == 2
print("SUCCESS!")
%load solutions/06_grad_soln.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Theano exercises
Step4: Solution
Step9: Exercise 2
Step10: Solution
Step14: Exercise 3
Step15: Solution
Step17: Exercise 4
Step18: Solution
Step22: Exercise 5
Step23: Solution
Step25: Exercise 6
Step26: Solution
|
4,771
|
<ASSISTANT_TASK:>
Python Code:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
from glob import glob
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.utils import np_utils
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input/flowers/flowers"))
# Any results you write to the current directory are saved as output.
# Make a parent directory `data` and three sub directories `train`, `valid` and 'test'
%rm -rf data # Remove if already present
%mkdir -p data/train/daisy
%mkdir -p data/train/tulip
%mkdir -p data/train/sunflower
%mkdir -p data/train/rose
%mkdir -p data/train/dandelion
%mkdir -p data/valid/daisy
%mkdir -p data/valid/tulip
%mkdir -p data/valid/sunflower
%mkdir -p data/valid/rose
%mkdir -p data/valid/dandelion
%mkdir -p data/test/daisy
%mkdir -p data/test/tulip
%mkdir -p data/test/sunflower
%mkdir -p data/test/rose
%mkdir -p data/test/dandelion
%ls data/train
%ls data/valid
%ls data/test
base_dir = "../input/flowers/flowers"
categories = os.listdir(base_dir)
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
from shutil import copyfile
plt.rcParams["figure.figsize"] = (20,3)
def train_valid_test(files):
This function splits the files in training, validation and testing sets with 60%, 20%
and 20% of data in each respectively
train_fles = files[:int(len(files)*0.6)]
valid_files = files[int(len(files)*0.6):int(len(files)*0.8)]
test_files = files[int(len(files)*0.8):]
return train_fles, valid_files, test_files
def copy_files(files, src, dest):
This function copy files from src to dest
for file in files:
copyfile("{}/{}".format(src, file), "{}/{}".format(dest, file))
def plot_images(category, images):
This method plots five images from a category
for i in range(len(images)):
plt.subplot(1,5,i+1)
plt.title(category)
image = mpimg.imread("{}/{}/{}".format(base_dir, category, images[i]))
plt.imshow(image)
plt.show()
total_images = []
for category in categories:
images = os.listdir("{}/{}".format(base_dir, category))
random.shuffle(images)
filtered_images = [image for image in images if image not in ['flickr.py', 'flickr.pyc', 'run_me.py']]
total_images.append(len(filtered_images))
train_images, valid_images, test_images = train_valid_test(filtered_images)
copy_files(train_images, "{}/{}".format(base_dir, category), "./data/train/{}".format(category))
copy_files(valid_images, "{}/{}".format(base_dir, category), "./data/valid/{}".format(category))
copy_files(test_images, "{}/{}".format(base_dir, category), "./data/test/{}".format(category))
plot_images(category, images[:5])
print("Total images: {}".format(np.sum(total_images)))
for i in range(len(categories)):
print("{}: {}".format(categories[i], total_images[i]))
y_pos = np.arange(len(categories))
plt.bar(y_pos, total_images, width=0.2,color='b',align='center')
plt.xticks(y_pos, categories)
plt.ylabel("Image count")
plt.title("Image count in different categories")
plt.show()
# define function to load train, valid and test datasets
def load_dataset(path):
data = load_files(path)
flower_files = np.array(data['filenames'])
print(data['target_names'])
flower_targets = np_utils.to_categorical(np.array(data['target']), 5)
return flower_files, flower_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('data/train')
valid_files, valid_targets = load_dataset('data/valid')
test_files, test_targets = load_dataset('data/test')
print('There are %d total flower categories.' % len(categories))
print('There are %s total flower images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training flower images.' % len(train_files))
print('There are %d validation flower images.' % len(valid_files))
print('There are %d test flower images.' % len(test_files))
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
simple_model = Sequential()
print(train_tensors.shape)
### Define the architecture of the simple model.
simple_model.add(Conv2D(filters=16, kernel_size=2, strides=1, activation='relu', input_shape=(224,224,3)))
simple_model.add(GlobalAveragePooling2D())
simple_model.add(Dense(5, activation='softmax'))
simple_model.summary()
simple_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Create a `saved_models` directory for saving best model
%mkdir -p saved_models
from keras.callbacks import ModelCheckpoint
### number of epochs
epochs = 50
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.simple.hdf5',
verbose=1, save_best_only=True)
simple_model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
simple_model.load_weights('saved_models/weights.best.simple.hdf5')
# get index of predicted flower category for each image in test set
flower_predictions = [np.argmax(simple_model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(flower_predictions)==np.argmax(test_targets, axis=1))/len(flower_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
model = Sequential()
print(train_tensors.shape)
### Define architecture.
model.add(Conv2D(filters=16, kernel_size=2, strides=1, activation='relu', input_shape=(224,224,3)))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(filters=32, kernel_size=2, strides=1, activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(filters=64, kernel_size=2, strides=1, activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(GlobalAveragePooling2D())
model.add(Dense(5, activation='softmax'))
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
### number of epochs
epochs = 50
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
# get index of predicted flower category for each image in test set
flower_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(flower_predictions)==np.argmax(test_targets, axis=1))/len(flower_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from keras.models import Model
inception_resnet = InceptionResNetV2(weights="imagenet",include_top=False, input_shape=(224,224,3))
for layer in inception_resnet.layers[:5]:
layer.trainable = False
output_model = inception_resnet.output
output_model = Flatten()(output_model)
output_model = Dense(200, activation='relu')(output_model)
output_model = Dropout(0.5)(output_model)
output_model = Dense(200, activation='relu')(output_model)
output_model = Dense(5, activation='softmax')(output_model)
model = Model(inputs=inception_resnet.input, outputs=output_model)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
### number of epochs
epochs = 50
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.inception_resnetv2.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
### load best weights
model.load_weights('saved_models/weights.best.inception_resnetv2.hdf5')
# get index of predicted flower category for each image in test set
flower_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(flower_predictions)==np.argmax(test_targets, axis=1))/len(flower_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
for i in range(5):
predicted = np.argmax(model.predict(np.expand_dims(test_tensors[i], axis=0)))
actual = np.argmax(test_targets[i])
print("Predicted: {}, Actual: {}, Name: {}".format(predicted, actual, test_files[i].split("/")[2]))
image = mpimg.imread(test_files[i])
plt.imshow(image)
plt.show()
%rm -rf data
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reorganize the data
Step5: Find all the categories of the flowers
Step6: Statistics of flowers
Step7: Observations
Step8: Data Transformation
Step9: Create a 4D tensor
Step10: The ptahs_to_tensor applies path_to_tensor to all images and returns a list of tensors.
Step11: Pre-process the Data
Step12: <a id="step2"></a>
Step 2
Step13: Making Predictions with the simple model
Step14: Benchmark model's performance
Step 3
Step15: Load best weight of the model
Step16: Get the accuracy of the model
Step17: <a id="step4"></a>
Step 4
Step18: Load the best weight of the model
Step19: Get the accuracy on test set
Step20: Delete created directory and files. It's necessary to have only few files otherwise Kaggle won't allow to commit a kernel.
|
4,772
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'sandbox-2', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
4,773
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from pyquickhelper.helpgen import NbImage
NbImage("images/2048.png", width=200)
import numpy
def create_game():
return numpy.zeros((4,4), dtype=int)
create_game()
import random
def gameover1(game):
arr = game.ravel()
arr = game[game==0]
return len(arr) == 0
def gameover(game):
return numpy.ma.masked_not_equal(game, 0).count() == 0
def joue(game):
if gameover(game):
raise Exception("Game Over\n" + str(game))
else:
while True:
i = random.randint(0, game.shape[0]-1)
j = random.randint(0, game.shape[1]-1)
if game[i,j] == 0:
n = random.randint(0,3)
game[i,j] = 4 if n == 0 else 2
break
return game
game = create_game()
joue(game)
joue(game)
game = create_game()
iter = 0
while True:
try:
joue(game)
except Exception as e:
print("itération", iter)
print(game)
break
iter += 1
def process_line(line):
res = []
for n in line:
if n == 0:
# Un 0, on passe.
continue
if len(res) == 0:
# Premier nombre, on ajoute au résultat.
res.append(n)
else:
prev = res[-1]
if prev == n:
# Si le nombre est identique on combine.
res[-1] = 2*n
else:
# Sinon on ajoute.
res.append(n)
while len(res) < len(line):
res.append(0)
return res
process_line([2,2,4,0])
process_line([0,2,0,0])
def update_game(game, direction):
if direction == 0:
lines = [process_line(game[i,:]) for i in range(game.shape[0])]
game = numpy.array(lines)
elif direction == 1:
lines = [process_line(game[:,i]) for i in range(game.shape[1])]
game = numpy.array(lines).T
elif direction == 2:
lines = [list(reversed(process_line(game[i,::-1]))) for i in range(game.shape[0])]
game = numpy.array(lines)
elif direction == 3:
lines = [list(reversed(process_line(game[::-1,i]))) for i in range(game.shape[1])]
game = numpy.array(lines).T
return game
game = create_game()
for i in range(0,5):
game = joue(game)
print('-------------')
print(game)
direction = i % 4
game = update_game(game, direction)
print("direction=",direction)
print(game)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercice 1
Step2: La seconde tire un nombre aléatoire et l'ajoute dans une case vide choisie au hasard s'il en reste. S'il n'en reste plus, le jeu est terminée. On utilise la fonction ravel pour transformer la matrice en un tableau et vérifier qu'il y a des éléments nuls. C'est la fonction gameover1 ou alors on peut utiliser la fonction masked_not_equal. C'est la fonction gameover.
Step3: On joue un second coup.
Step4: On vérifie qu'au bout de 16 fois, la fonction génère une exception.
Step5: Pour jouer un coup, il faut faire tomber les nombres. C'est la même chose quelque soit la colonne ou la ligne. On créer une fonction pour cela.
Step6: On écrit la fonction de mise à jour pour les 4 directions et répétées 4 fois.
Step7: On teste pour 5 coups.
|
4,774
|
<ASSISTANT_TASK:>
Python Code:
from petal_helper import *
import tensorflow as tf
# Detect TPU, return appropriate distribution strategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
ds_train = get_training_dataset()
ds_valid = get_validation_dataset()
ds_test = get_test_dataset()
print("Training:", ds_train)
print ("Validation:", ds_valid)
print("Test:", ds_test)
with strategy.scope():
pretrained_model = tf.keras.applications.VGG16(
weights='imagenet',
include_top=False ,
input_shape=[*IMAGE_SIZE, 3]
)
pretrained_model.trainable = False
model = tf.keras.Sequential([
# To a base pretrained on ImageNet to extract features from images...
pretrained_model,
# ... attach a new head to act as a classifier.
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(CLASSES), activation='softmax')
])
model.compile(
optimizer='adam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
)
model.summary()
# Define the batch size. This will be 16 with TPU off and 128 (=16*8) with TPU on
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
# Define training epochs
EPOCHS = 12
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
history = model.fit(
ds_train,
validation_data=ds_valid,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
)
display_training_curves(
history.history['loss'],
history.history['val_loss'],
'loss',
211,
)
display_training_curves(
history.history['sparse_categorical_accuracy'],
history.history['val_sparse_categorical_accuracy'],
'accuracy',
212,
)
test_ds = get_test_dataset(ordered=True)
print('Computing predictions...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Generating submission.csv file...')
# Get image ids from test set and convert to unicode
test_ids_ds = test_ds.map(lambda image, idnum: idnum).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES))).numpy().astype('U')
# Write the submission file
np.savetxt(
'submission.csv',
np.rec.fromarrays([test_ids, predictions]),
fmt=['%s', '%d'],
delimiter=',',
header='id,label',
comments='',
)
# Look at the first few predictions
!head submission.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Distribution Strategy
Step2: TensorFlow will distribute training among the eight TPU cores by creating eight different replicas of your model.
Step3: These are tf.data.Dataset objects. You can think about a dataset in TensorFlow as a stream of data records. Check out this guide for more on working with the tf.data API.
Step4: Training
Step5: This next cell shows how the loss and metrics progressed during training. Thankfully, it converges!
Step6: Predictions
Step7: We'll generate a file submission.csv. This file is what you'll submit to get your score on the leaderboard.
|
4,775
|
<ASSISTANT_TASK:>
Python Code:
from math import sin, exp
def func(x):
return sin(x / 5.) * exp(x / 10.) + 5. * exp(-x / 2.)
import numpy as np
from scipy import linalg
arrCoordinates = np.arange(1., 15.1, 0.1)
arrFunction = np.array([func(coordinate) for coordinate in arrCoordinates])
#многочлен первой степени
arrCoord1 = np.array([1, 15])
N = 2
arrA1 = np.empty((0, N))
for i in xrange(N):
arrA1Line = list()
for j in xrange(N):
arrA1Line.append(arrCoord1[i] ** j)
arrA1 = np.append(arrA1, np.array([arrA1Line]), axis = 0)
arrB1 = np.array([func(coordinate) for coordinate in arrCoord1])
print arrCoord1
print arrA1
print arrB1
arrX1 = linalg.solve(arrA1, arrB1)
print arrX1
def func1(x): return arrX1[0] + arrX1[1] * x
arrFunc1 = np.array([func1(coordinate) for coordinate in arrCoordinates])
%matplotlib inline
import matplotlib.pylab as plt
plt.plot(arrCoordinates, arrFunction, arrCoordinates, arrFunc1)
plt.show()
#многочлен второй степени
arrCoord2 = np.array([1, 8, 15])
N = 3
arrA2 = np.empty((0, N))
for i in xrange(N):
arrA2Line = list()
for j in xrange(N):
arrA2Line.append(arrCoord2[i] ** j)
arrA2 = np.append(arrA2, np.array([arrA2Line]), axis = 0)
arrB2 = np.array([func(coordinate) for coordinate in arrCoord2])
print arrCoord2
print arrA2
print arrB2
arrX2 = linalg.solve(arrA2, arrB2)
print arrX2
def func2(x): return arrX2[0] + arrX2[1] * x + arrX2[2] * (x ** 2)
arrFunc2 = np.array([func2(coordinate) for coordinate in arrCoordinates])
plt.plot(arrCoordinates, arrFunction, arrCoordinates, arrFunc1, arrCoordinates, arrFunc2)
plt.show()
#многочлен третьей степени
arrCoord3 = np.array([1, 4, 10, 15])
N = 4
arrA3 = np.empty((0, N))
for i in xrange(N):
arrA3Line = list()
for j in xrange(N):
arrA3Line.append(arrCoord3[i] ** j)
arrA3 = np.append(arrA3, np.array([arrA3Line]), axis = 0)
arrB3 = np.array([func(coordinate) for coordinate in arrCoord3])
print arrCoord3
print arrA3
print arrB3
arrX3 = linalg.solve(arrA3, arrB3)
print arrX3
def func3(x): return arrX3[0] + arrX3[1] * x + arrX3[2] * (x ** 2) + arrX3[3] * (x ** 3)
arrFunc3 = np.array([func3(coordinate) for coordinate in arrCoordinates])
plt.plot(arrCoordinates, arrFunction, arrCoordinates, arrFunc1, arrCoordinates, arrFunc2, arrCoordinates, arrFunc3)
plt.show()
with open('answer2.txt', 'w') as fileAnswer:
for item in arrX3:
fileAnswer.write(str(item) + ' ')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Сформировать СЛАУ для многочлена первой степени, который должен совпадать с функцией в точках 1 и 15.
Step2: 2. Многочлен второй степени в точка 1, 8, 15.
Step3: 3. Многочлен третьей степени в точка 1, 4, 10, 15.
|
4,776
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'sandbox-3', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
4,777
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
counter = Counter(text)
sorted_vocab = sorted(counter, key=counter.get, reverse=True)
vocab_to_int = {word: i for i, word in enumerate(sorted_vocab)}
int_to_vocab = {v: k for k, v in vocab_to_int.items()}
return (vocab_to_int, int_to_vocab)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
return {
'.': '||Period||',
',': '||Comma||',
'"': '||QuotationMark||',
';': '||Semicolon||',
'!': '||ExclamationMark||',
'?': '||QuestionMark||',
'(': '||LeftParentheses||',
')': '||RightParentheses||',
'--': '||Dash||',
'\n': '||Return||'
}
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, shape=[None, None], name="input")
targets = tf.placeholder(tf.int32, shape=[None, None], name="targets")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return (inputs, targets, learning_rate)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
# Using 2 layers takes more no.of epochs for training, but giving similar training accuracy
# as of 1 layer. Hence, using just 1 layer.
# 1 layer - 150 epochs - train_loss: 0.036
# 2 layers - 400 epochs - train_loss: 0.033
n_layers = 1
lstm_layer = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm_layer] * n_layers)
initialized_state = cell.zero_state(batch_size, tf.float32)
initialized_state = tf.identity(initialized_state, name='initial_state')
return (cell, initialized_state)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
embedded_input = tf.Variable(tf.truncated_normal((vocab_size, embed_dim), stddev=0.01))
return tf.nn.embedding_lookup(embedded_input, input_data)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name="final_state")
return (outputs, final_state)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
# Apply embedding to input data using get_embed function.
embeddings = get_embed(input_data, vocab_size, rnn_size)
# Build RNN using `cell` and embeddings by the `build_rnn` function.
outputs, final_state = build_rnn(cell, embeddings)
# Applying a fully connected layer, with default initializer for weights and biases.
# `vocab_size` as the no.of outputs.
logits = tf.contrib.layers.fully_connected(outputs,
vocab_size,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
biases_initializer=tf.zeros_initializer(),
activation_fn=None)
return (logits, final_state)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
batch_count = int(len(int_text) / (batch_size * seq_length))
batch_word_length = batch_count * batch_size * seq_length
x = np.array(int_text[:batch_word_length])
x_batches = np.split(x.reshape(batch_size, -1), batch_count, 1)
y = np.array(int_text[1:batch_word_length + 1])
y_batches = np.split(y.reshape(batch_size, -1), batch_count, 1)
# Last target value of last batch is the first input value of first batch
y_batches[-1][-1][-1] = x_batches[0][0][0]
return np.array(list(zip(x_batches, y_batches)))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 201 # Just to make the output of epoch 100 visible
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 8
# Sequence Length
seq_length = 64
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 100
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
input_tensor = loaded_graph.get_tensor_by_name('input:0')
initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0')
final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0')
probs_tensor = loaded_graph.get_tensor_by_name('probs:0')
return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
word_id = np.random.choice(len(probabilities), p=probabilities)
return int_to_vocab[word_id]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
4,778
|
<ASSISTANT_TASK:>
Python Code:
# Author: Alan Leggitt <alan.leggitt@ucsf.edu>
#
# License: BSD-3-Clause
import os.path as op
import mne
from mne import setup_source_space, setup_volume_source_space
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'sample'
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
# setup a cortical surface source space and extract left hemisphere
surf = setup_source_space(subject, subjects_dir=subjects_dir, add_dist=False)
lh_surf = surf[0]
# setup a volume source space of the left cerebellum cortex
volume_label = 'Left-Cerebellum-Cortex'
sphere = (0, 0, 0, 0.12)
lh_cereb = setup_volume_source_space(
subject, mri=aseg_fname, sphere=sphere, volume_label=volume_label,
subjects_dir=subjects_dir, sphere_units='m')
# Combine the source spaces
src = surf + lh_cereb
fig = mne.viz.plot_alignment(subject=subject, subjects_dir=subjects_dir,
surfaces='white', coord_frame='mri',
src=src)
mne.viz.set_3d_view(fig, azimuth=180, elevation=90,
distance=0.30, focalpoint=(-0.03, -0.01, 0.03))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup the source spaces
Step2: Plot the positions of each source space
|
4,779
|
<ASSISTANT_TASK:>
Python Code:
# Author: Annalisa Pascarella <a.pascarella@iac.cnr.it>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
from nilearn import plotting
import mne
from mne.minimum_norm import make_inverse_operator, apply_inverse
# Set dir
data_path = mne.datasets.sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
# List substructures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Get a surface-based source space, here with few source points for speed
# in this demonstration, in general you should use oct6 spacing!
src = mne.setup_source_space(subject, spacing='oct5',
add_dist=False, subjects_dir=subjects_dir)
# Now we create a mixed src space by adding the volume regions specified in the
# list labels_vol. First, read the aseg file and the source space bounds
# using the inner skull surface (here using 10mm spacing to save time,
# we recommend something smaller like 5.0 in actual analyses):
vol_src = mne.setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=False, # just for speed, usually this should be True
verbose=True)
# Generate the mixed source space
src += vol_src
# Visualize the source space.
src.plot(subjects_dir=subjects_dir)
n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')
# Compute the fwd matrix
fwd = mne.make_forward_solution(
fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
# Compute inverse solution and for each epoch
snr = 3.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(evoked.info, fwd, noise_cov,
depth=None, fixed=False)
stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(
subject, parc=parc, subjects_dir=subjects_dir)
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(
[stc], labels_parc, src, mode='mean', allow_empty=True)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stc.times, label_ts[0][71, :].T, 'r',
label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up our source space.
Step2: We could write the mixed source space with
Step3: Average the source estimates within each label of the cortical parcellation
|
4,780
|
<ASSISTANT_TASK:>
Python Code:
from lifelines.datasets import load_rossi
rossi = load_rossi()
cph = CoxPHFitter()
cph.fit(rossi, 'week', 'arrest')
cph.print_summary(model="untransformed variables", decimals=3)
cph.check_assumptions(rossi, p_value_threshold=0.05, show_plots=True)
from lifelines.statistics import proportional_hazard_test
results = proportional_hazard_test(cph, rossi, time_transform='rank')
results.print_summary(decimals=3, model="untransformed variables")
cph.fit(rossi, 'week', 'arrest', strata=['wexp'])
cph.print_summary(model="wexp in strata")
cph.check_assumptions(rossi, show_plots=True)
cph.fit(rossi, 'week', 'arrest', strata=['wexp'], formula="bs(age, df=4, lower_bound=10, upper_bound=50) + fin +race + mar + paro + prio")
cph.print_summary(model="spline_model"); print()
cph.check_assumptions(rossi, show_plots=True, p_value_threshold=0.05)
rossi_strata_age = rossi.copy()
rossi_strata_age['age_strata'] = pd.cut(rossi_strata_age['age'], np.arange(0, 80, 3))
rossi_strata_age[['age', 'age_strata']].head()
# drop the orignal, redundant, age column
rossi_strata_age = rossi_strata_age.drop('age', axis=1)
cph.fit(rossi_strata_age, 'week', 'arrest', strata=['age_strata', 'wexp'])
cph.print_summary(3, model="stratified age and wexp")
cph.plot()
cph.check_assumptions(rossi_strata_age)
from lifelines.utils import to_episodic_format
# the time_gaps parameter specifies how large or small you want the periods to be.
rossi_long = to_episodic_format(rossi, duration_col='week', event_col='arrest', time_gaps=1.)
rossi_long.head(25)
rossi_long['time*age'] = rossi_long['age'] * rossi_long['stop']
from lifelines import CoxTimeVaryingFitter
ctv = CoxTimeVaryingFitter()
ctv.fit(rossi_long,
id_col='id',
event_col='arrest',
start_col='start',
stop_col='stop',
strata=['wexp'])
ctv.print_summary(3, model="age * time interaction")
ctv.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Checking assumptions with check_assumptions
Step2: Alternatively, you can use the proportional hazard test outside of check_assumptions
Step3: Stratification
Step4: Since age is still violating the proportional hazard assumption, we need to model it better. From the residual plots above, we can see a the effect of age start to become negative over time. This will be relevant later. Below, we present three options to handle age.
Step5: We see may still have potentially some violation, but it's a heck of a lot less. Also, interestingly, when we include these non-linear terms for age, the wexp proportionality violation disappears. It is not uncommon to see changing the functional form of one variable effects other's proportional tests, usually positively. So, we could remove the strata=['wexp'] if we wished.
Step6: Introduce time-varying covariates
Step7: Each subject is given a new id (but can be specified as well if already provided in the dataframe). This id is used to track subjects over time. Notice the arrest col is 0 for all periods prior to their (possible) event as well.
|
4,781
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import edward as ed
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import os
import tensorflow as tf
from edward.models import Uniform
from tensorflow.contrib import slim
from tensorflow.examples.tutorials.mnist import input_data
import pandas as pd
# load data set
# put the dimensions of preprocessed images
data_width = 150
data_height = 150
data_dim = data_width * data_height
data_file_path = 'ganesh_preproecessed_images.csv'
X = np.array(pd.read_csv(data_file_path, header=None))
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(data_width, data_height), cmap='Greys_r')
return fig
ed.set_seed(42)
M = 300 # batch size during training
d = 100 # latent dimension
IMG_DIR = "img"
if not os.path.exists(IMG_DIR):
os.makedirs(IMG_DIR)
x_ph = tf.placeholder(tf.float32, [M, data_dim])
def generative_network(eps):
h1 = slim.fully_connected(eps, 128, activation_fn=tf.nn.relu)
h2 = slim.fully_connected(h1, 128, activation_fn=tf.nn.relu)
x = slim.fully_connected(h2, data_dim, activation_fn=tf.sigmoid)
return x
with tf.variable_scope("Gen"):
eps = Uniform(tf.zeros([M, d]) - 1.0, tf.ones([M, d]))
x = generative_network(eps)
def discriminative_network(x):
Outputs probability in logits.
h1 = slim.fully_connected(x, 128, activation_fn=tf.nn.relu)
h2 = slim.fully_connected(h1, 128, activation_fn=tf.nn.relu)
logit = slim.fully_connected(h1, 1, activation_fn=None)
return logit
inference = ed.GANInference(
data={x: x_ph}, discriminator=discriminative_network)
optimizer = tf.train.AdamOptimizer()
optimizer_d = tf.train.AdamOptimizer()
inference = ed.GANInference(
data={x: x_ph}, discriminator=discriminative_network)
inference.initialize(
optimizer=optimizer, optimizer_d=optimizer_d,
n_iter=20000, n_print=1000)
sess = ed.get_session()
tf.global_variables_initializer().run()
idx = np.random.randint(M, size=16)
i = 0
for t in range(inference.n_iter):
if t % inference.n_print == 0:
samples = sess.run(x)
samples = samples[idx, ]
fig = plot(samples)
plt.savefig(os.path.join(IMG_DIR, '{}.png').format(
str(i).zfill(3)), bbox_inches='tight')
plt.close(fig)
i += 1
# x_batch, _ = mnist.train.next_batch(M)
x_batch = X[np.random.choice(np.arange(X.shape[0]), size=M)]
info_dict = inference.update(feed_dict={x_ph: x_batch})
inference.print_progress(info_dict)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Model
Step4: Inference
Step5: Let $p^*(\mathbf{x})$ represent the true data distribution.
Step6: We'll use ADAM as optimizers for both the generator and discriminator.
Step7: We now form the main loop which trains the GAN. At each iteration, it
|
4,782
|
<ASSISTANT_TASK:>
Python Code:
PROJECT_DIR = "../../"
use_toy_data = False
LOG_DIR = 'logs' # Tensorboard log directory
if use_toy_data:
batch_size = 8
embedding_dim = 5
cell_size = 32
max_len = 6
else:
batch_size = 64
embedding_dim = 20
cell_size = 128
max_len = 33
use_attention = True
use_bidirectional_encoder = True
is_time_major = True
if use_toy_data:
input_fn = 'toy_input.txt'
with open(input_fn, 'w') as f:
f.write('a b c\td e f d e f\n')
f.write('d e f\ta b c a b c\n')
else:
DATA_DIR = '../../data/'
input_fn = 'instrumental.full.train'
input_fn = os.path.join(DATA_DIR, input_fn)
if not os.path.exists(input_fn):
import urllib
u = urllib.request.URLopener()
u.retrieve(
"http://sandbox.mokk.bme.hu/~judit/resources/instrumental.full.train", input_fn)
class Dataset(object):
PAD = 0
SOS = 1
EOS = 2
UNK = 3
#src_vocab = ['PAD', 'UNK']
constants = ['PAD', 'SOS', 'EOS', 'UNK']
hu_alphabet = list("aábcdeéfghiíjklmnoóöőpqrstuúüűvwxyz-+._")
def __init__(self, fn, config, src_alphabet=None, tgt_alphabet=None):
self.config = config
self.create_tables(src_alphabet, tgt_alphabet)
self.load_and_preproc_dataset(fn)
def create_tables(self, src_alphabet, tgt_alphabet):
if src_alphabet is None:
self.src_vocab = Dataset.constants + Dataset.hu_alphabet
else:
self.src_vocab = Dataset.constants + alphabet
self.src_table = lookup_ops.index_table_from_tensor(
tf.constant(self.src_vocab), default_value=Dataset.UNK
)
if self.config.share_vocab:
self.tgt_vocab = self.src_vocab
self.tgt_table = self.src_table
else:
if tgt_alphabet is None:
self.tgt_vocab = Dataset.constants + Dataset.hu_alphabet
else:
self.tgt_vocab = Dataset.constants + alphabet
self.tgt_table = lookup_ops.index_table_from_tensor(
tf.constant(self.tgt_vocab), default_value=Dataset.UNK
)
self.src_vocab_size = len(self.src_vocab)
self.tgt_vocab_size = len(self.tgt_vocab)
def load_and_preproc_dataset(self, fn):
dataset = tf.contrib.data.TextLineDataset(fn)
dataset = dataset.repeat()
dataset = dataset.map(lambda s: tf.string_split([s], delimiter='\t').values)
src = dataset.map(lambda s: s[0])
tgt = dataset.map(lambda s: s[1])
src = src.map(lambda s: tf.string_split([s], delimiter=' ').values)
src = src.map(lambda s: s[:self.config.src_maxlen])
tgt = tgt.map(lambda s: tf.string_split([s], delimiter=' ').values)
tgt = tgt.map(lambda s: s[:self.config.tgt_maxlen])
src = src.map(lambda words: self.src_table.lookup(words))
tgt = tgt.map(lambda words: self.tgt_table.lookup(words))
dataset = tf.contrib.data.Dataset.zip((src, tgt))
dataset = dataset.map(
lambda src, tgt: (
src,
tf.concat(([Dataset.SOS], tgt), 0),
tf.concat((tgt, [Dataset.EOS]), 0),
)
)
dataset = dataset.map(
lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in))
)
batched = dataset.padded_batch(
self.config.batch_size,
padded_shapes=(
tf.TensorShape([self.config.src_maxlen]),
tf.TensorShape([self.config.tgt_maxlen+2]),
tf.TensorShape([None]),
tf.TensorShape([]),
tf.TensorShape([]),
)
)
self.batched_iter = batched.make_initializable_iterator()
s = self.batched_iter.get_next()
self.src_ids = s[0]
self.tgt_in_ids = s[1]
self.tgt_out_ids = s[2]
self.src_size = s[3]
self.tgt_size = s[4]
def run_initializers(self, session):
session.run(tf.tables_initializer())
session.run(self.batched_iter.initializer)
class Config(object):
default_fn = os.path.join(
PROJECT_DIR, "config", "seq2seq", "default.yaml"
)
@staticmethod
def load_defaults(fn=default_fn):
with open(fn) as f:
return yaml.load(f)
@classmethod
def from_yaml(cls, fn):
params = yaml.load(fn)
return cls(**params)
def __init__(self, **kwargs):
defaults = Config.load_defaults()
for param, val in defaults.items():
setattr(self, param, val)
for param, val in kwargs.items():
setattr(self, param, val)
config = Config(src_maxlen=30, tgt_maxlen=33)
dataset = Dataset(input_fn, config)
with tf.variable_scope("embedding"):
embedding = tf.get_variable("embedding", [dataset.src_vocab_size, embedding_dim], dtype=tf.float32)
embedding_input = tf.nn.embedding_lookup(embedding, dataset.src_ids)
decoder_emb_inp = tf.nn.embedding_lookup(embedding, dataset.tgt_in_ids)
if is_time_major:
embedding_input = tf.transpose(embedding_input, [1, 0, 2])
decoder_emb_inp = tf.transpose(decoder_emb_inp, [1, 0, 2])
with tf.variable_scope("encoder"):
if use_bidirectional_encoder:
fw_cell = tf.nn.rnn_cell.BasicLSTMCell(cell_size)
fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell, input_keep_prob=0.8)
bw_cell = tf.nn.rnn_cell.BasicLSTMCell(cell_size)
bw_cell = tf.contrib.rnn.DropoutWrapper(bw_cell, input_keep_prob=0.8)
o, e = tf.nn.bidirectional_dynamic_rnn(
fw_cell, bw_cell, embedding_input, dtype='float32', sequence_length=dataset.src_size,
time_major=is_time_major)
encoder_outputs = tf.concat(o, -1)
encoder_state = e
else:
fw_cell = tf.nn.rnn_cell.BasicLSTMCell(cell_size)
fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell, input_keep_prob=0.8)
o, e = tf.nn.dynamic_rnn(fw_cell, embedding_input, dtype='float32',
sequence_length=dataset.src_size, time_major=is_time_major)
encoder_outputs = o
encoder_state = e
with tf.variable_scope("decoder", dtype="float32") as scope:
if use_bidirectional_encoder:
decoder_cells = []
for i in range(2):
decoder_cell = tf.contrib.rnn.BasicLSTMCell(cell_size)
decoder_cell = tf.contrib.rnn.DropoutWrapper(decoder_cell, input_keep_prob=0.8)
decoder_cells.append(decoder_cell)
decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)
if use_attention:
if is_time_major:
attention_states = tf.transpose(encoder_outputs, [1, 0, 2])
else:
attention_states = encoder_outputs
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
cell_size, attention_states, memory_sequence_length=dataset.src_size,
scale=True
)
decoder_cell = tf.contrib.seq2seq.AttentionWrapper(
decoder_cell, attention_mechanism, attention_layer_size=cell_size,
name="attention"
)
if is_time_major:
decoder_initial_state = decoder_cell.zero_state(
tf.shape(decoder_emb_inp)[1], tf.float32).clone(cell_state=encoder_state)
else:
decoder_initial_state = decoder_cell.zero_state(
tf.shape(decoder_emb_inp)[0], tf.float32).clone(cell_state=encoder_state)
else:
decoder_initial_state = encoder_state
else:
decoder_cell = tf.contrib.rnn.BasicLSTMCell(cell_size)
decoder_initial_state = encoder_state
helper = tf.contrib.seq2seq.TrainingHelper(
decoder_emb_inp, dataset.tgt_size, time_major=is_time_major)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell, helper, decoder_initial_state)
outputs, final, _ = tf.contrib.seq2seq.dynamic_decode(
decoder, output_time_major=is_time_major, swap_memory=True, scope=scope)
output_proj = layers_core.Dense(dataset.tgt_vocab_size, name="output_proj")
logits = output_proj(outputs.rnn_output)
with tf.variable_scope("train"):
if is_time_major:
logits = tf.transpose(logits, [1, 0, 2])
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=dataset.tgt_out_ids, logits=logits)
target_weights = tf.sequence_mask(dataset.tgt_size, tf.shape(logits)[1], tf.float32)
else:
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=dataset.tgt_out_ids, logits=logits)
target_weights = tf.sequence_mask(dataset.tgt_size, tf.shape(logits)[1], tf.float32)
loss = tf.reduce_sum(crossent * target_weights) / tf.to_float(batch_size)
tf.summary.scalar("loss", loss)
learning_rate = tf.placeholder(dtype=tf.float32, name="learning_rate")
max_global_norm = tf.placeholder(dtype=tf.float32, name="max_global_norm")
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.5)
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
for grad, var in zip(gradients, params):
tf.summary.histogram(var.op.name+'/gradient', grad)
gradients, _ = tf.clip_by_global_norm(gradients, max_global_norm)
for grad, var in zip(gradients, params):
tf.summary.histogram(var.op.name+'/clipped_gradient', grad)
update = optimizer.apply_gradients(zip(gradients, params))
with tf.variable_scope("greedy_decoder"):
g_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding, tf.fill([dataset.config.batch_size], dataset.SOS), dataset.EOS)
g_decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, g_helper, decoder_initial_state,
output_layer=output_proj)
g_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(g_decoder, maximum_iterations=30)
if use_attention is False:
with tf.variable_scope("beam_search"):
beam_width = 4
start_tokens = tf.fill([config.batch_size], dataset.SOS)
bm_dec_initial_state = tf.contrib.seq2seq.tile_batch(
encoder_state, multiplier=beam_width)
bm_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=embedding,
start_tokens=start_tokens,
initial_state=bm_dec_initial_state,
beam_width=beam_width,
output_layer=output_proj,
end_token=dataset.EOS
)
bm_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
bm_decoder, maximum_iterations=config.tgt_maxlen)
#sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
sess = tf.Session()
dataset.run_initializers(sess)
sess.run(tf.global_variables_initializer())
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 's2s_sandbox', 'tmp'))
writer.add_graph(sess.graph)
%%time
def train(epochs, logstep, lr):
print("Running {} epochs with learning rate {}".format(epochs, lr))
for i in range(epochs):
_, s = sess.run([update, merged_summary], feed_dict={learning_rate: lr, max_global_norm: 5.0})
l = sess.run(loss)
writer.add_summary(s, i)
if i % logstep == logstep - 1:
print("Iter {}, learning rate {}, loss {}".format(i+1, lr, l))
print("Start training...")
if use_toy_data:
train(100, 10, .5)
else:
train(350, 50, 1)
train(1000, 100, 0.1)
train(1000, 100, 0.01)
inv_vocab = {i: v for i, v in enumerate(dataset.tgt_vocab)}
inv_vocab[-1] = 'UNK'
skip_symbols = ('PAD',)
def decode_ids(input_ids, output_ids):
decoded = []
for sample_i in range(output_ids.shape[0]):
input_sample = input_ids[sample_i]
output_sample = output_ids[sample_i]
input_decoded = [inv_vocab[s] for s in input_sample]
input_decoded = ''.join(c for c in input_decoded if c not in skip_symbols)
output_decoded = [inv_vocab[s] for s in output_sample]
try:
eos_idx = output_decoded.index('EOS')
except ValueError: # EOS not in list
eos_idx = len(output_decoded)
output_decoded = output_decoded[:eos_idx]
output_decoded = ''.join(c for c in output_decoded if c not in skip_symbols)
decoded.append((input_decoded, output_decoded))
return decoded
if use_attention is True:
input_ids, output_ids = sess.run([dataset.src_ids, g_outputs.sample_id])
else:
input_ids, output_ids, bm_output_ids = sess.run([dataset.src_ids, g_outputs.sample_id,
bm_outputs.predicted_ids])
decoded = decode_ids(input_ids, output_ids)
print('\n'.join(
'{} ---> {}'.format(dec[0], dec[1]) for dec in decoded
))
if use_attention is False:
all_decoded = []
for beam_i in range(beam_width):
inputs = []
all_decoded.append([])
decoded = decode_ids(input_ids, bm_output_ids[:,:,beam_i])
for dec in decoded:
all_decoded[-1].append(dec[1])
inputs.append(dec[0])
print('\n'.join(
'{} ---> {}'.format(inputs[i], ' / '.join(d[i] for d in all_decoded))
for i in range(len(inputs))
))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download data if necessary
Step2: Load and preprocess data
Step3: Create model
Step4: Encoder
Step5: Decoder
Step6: Loss and training operations
Step7: Greedy decoder for inference
Step8: Beam search decoder
Step9: Starting session
Step10: Training
Step11: Inference
Step12: Beam search decoding
|
4,783
|
<ASSISTANT_TASK:>
Python Code:
x = 10 # x é um inteiro
print type(x)
x = 1.3 # x é um ponto flutuante
print type(x)
x = "Ola" # x é uma string
print type(x)
x = [1, 5, 10] # x é uma lista
print type(x)
x = 10
for i in range(20):
# Início da repetição For
x = x + 1
if x%2 == 0:
# Instrução se condição verdadeira
x = x + 1
else:
# Instrução se a condição for falsa
x = x + 2
# Fim do bloco de repetição For
print x # Isso está fora do for! Boa Ideia!
def Soma(x,y):
return x+y
def Mult(x,y):
return x*y
def SomaMult(x,y):
return x+y, x*y # múltiplas saídas separadas por vírgula
print Soma(10,2), Mult(100,2), SomaMult(10,2)
# O retorno de múltiplas saídas podem ser atribuídas diretamente para múltiplas variáveis
w,z = SomaMult(100,2)
print w, z
lista = [1, 2, True, "palavra"]
tupla = (1, 2, True, "palavra")
lista[1] = 3.0
print lista
tupla[1] = 3.0 # Vai dar erro!
# range(n) gera uma lista de valores entre 0 e n-1
# len(lista) retorna o tamanho de uma lista
def DobraValores(lista):
for i in range(len(lista)):
lista[i] = lista[i]*2
return lista
lista = [1,2,3,4]
lista2 = DobraValores(lista)
print lista, lista2 # As listas são passadas como referência para as funções
dicionario = { "Ana":12, "Joao":13, "Jose":17 } # declaração inicial do dicionário, pode ser {} para dic. vazio
print dicionario["Ana"] # acesso ao elemento pela chave entre colchetes
dicionario["Maria"] = 11 # podemos alterar ou inserir um novo elemento
print dicionario
print "As chaves do dicionário são: ", dicionario.keys()
print "Os valores do dicionário são: ", dicionario.values()
lista = range(10) # gera a lista [0,..,9]
print 8 in lista, 12 in lista
for x in lista:
print x
# Jeito tradicional, mas não otimizado
listaOriginal = [1,2,3,4,5,6,7,8,9]
listaQuadrada = []
for x in listaOriginal:
listaQuadrada.append(x*x)
print listaQuadrada
# Através do List Comprehension
listaQuadrada = [ x*x for x in listaOriginal ]
print listaQuadrada
listaQuadrada = ( x*x for x in listaOriginal )
print listaQuadrada # os elementos ainda não foram calculados
for x in listaQuadrada:
print x # a cada iteração apenas o próximo elemento é calculado, a lista não existe na memória
import os.path
caminho = os.path.join('Data','Aula01') # garante o uso correto de / ou \\ para diretórios
arquivo = os.path.join(caminho,'exemplo.txt')
f = open(arquivo)
for linha in f:
print linha
f.close()
# Como convenção importaremos a biblioteca numpy como np
import numpy as np
# EXERCICIO
# Crie uma array numpy com os valores 1, 2, 3
arraySimples = [1,2,3]
# Faça o produto escalar multiplicando a array por 5
vezesCinco = np.multiply(arraySimples, 5)
print arraySimples
print vezesCinco
# TESTE do exercício (2a)
assert np.all(vezesCinco == [5, 10, 15]), 'valor incorreto para vezesCinco'
print "Correto!"
# EXERCICIO
# A função np.arange(inicio,fim,passo) cria uma lista iniciando em inicio, terminando antes do fim seguindo passo
u = np.arange(0, 5, .5) # np.array([0,0.5,1.0,...,4.5])
v = np.arange(5, 10, .5)
elementoAelemento = u * v
prodInterno = np.dot(u, v)
print 'u: {0}'.format(u)
print 'v: {0}'.format(v)
print '\nelementoAelemento\n{0}'.format(elementoAelemento)
print '\nprodInterno\n{0}'.format(prodInterno)
# TESTE do exercício (2b)
assert np.all(elementoAelemento == [ 0., 2.75, 6., 9.75, 14., 18.75, 24., 29.75, 36., 42.75]), "Valores incorretos para elementoAelemento"
print "Primeiro teste OK"
assert prodInterno==183.75, "Valor incorreto para prodInterno"
print "Segundo teste OK"
# EXERCICIO
from numpy.linalg import pinv # agora podemos utilizar o comando inv() sem preceder com np.linalg
# Criar uma matriz com listas de listas
A = np.matrix([[1,2,3,4],[5,6,7,8]])
print 'A:\n{0}'.format(A)
# Imprima a matriz transposta
print '\nA transposta:\n{0}'.format(np.matrix.transpose(A))
# Multiplique A por sua Transposta
AAt = np.dot(A, np.matrix.transpose(A))
print '\nAAt:\n{0}'.format(AAt)
# Inverta AAt com o comando inv()
AAtInv = pinv(AAt)
print '\nAAtInv:\n{0}'.format(AAtInv)
# Mostre que a matriz vezes sua inversa é a identidade
# .round(n) arredonda os valores para n casas decimais
print '\nAAtInv * AAt:\n{0}'.format((np.multiply(AAt,AAtInv)).round(4))
# TESTE do exercício (2c)
assert np.all(AAt == np.matrix([[30, 70], [70, 174]])), "Valores incorretos para AAt"
print "Primeiro teste OK"
assert np.allclose(AAtInv, np.matrix([[0.54375, -0.21875], [-0.21875, 0.09375]])), "Valor incorreto para AAtInv"
print "Segundo teste OK"
# EXERCICIO
atributos = np.array([1, 2, 3, 4])
print 'atributos:\n{0}'.format(atributos)
# Crie uma array com os 3 últimos elementos de atributos
ultTres = atributos[-3:]
print '\nÚltimos três:\n{0}'.format(ultTres)
# TEST do exercício (2d)
assert np.all(ultTres == [2, 3, 4]), "Valores incorretos para ultTres"
print "Teste OK"
# EXERCICIO
# Lembre-se que: "lambda x, y: x + y" cria uma função que adiciona dois valores
mult10 = lambda x : x*10
print mult10(5)
# Note that the function still shows its name as <lambda>
print '\n', mult10
assert mult10(10)==100, "Função incorreta"
print "Teste OK"
import numpy as np
p1 = (1,3)
p2 = (3,7)
euclidiana2D = lambda (x0,y0), (x1,y1): np.sqrt(((x0-x1)**2) + ((y0-y1)**2)) # sqrt é a raíz quadrada
print euclidiana2D(p1,p2)
# Lógica não-funcional
a = 0
def inc():
global a
a = a + 1
# Lógica funcional
def incFn(a):
return a+1
# Função para somar 3 valores
def Soma3(a,b,c):
return a+b+c
# Função que soma apenas dois valores
def Soma2(a,b):
return a+b
# Soma 3 poderia ser criado a partir de Soma2:
Soma3Fn = lambda a,b,c: Soma2(Soma2(a,b),c)
# Cria uma função que calcula a eq. do segundo grau no formato ax^2 + bx + c
def Eq2grau(a,b,c):
def f(x):
return a*x**2 + b*x + c
return f
f = Eq2grau(10,2,1)
print f(10)
# EXERCICIO
# Escreva uma função Soma(x) que retorna uma função que recebe um valor y e soma ao x.
def Soma(x):
def f(y):
return x+y
return f
Soma2 = lambda a,b: Soma(a)(b)
Soma3 = lambda a,b,c: Soma(Soma(a)(b))(c)
print Soma2(1,3), Soma3(1,2,3)
assert Soma3(1,2,3)==6, "Erro na função"
print "Ok"
class FuncionalW(object):
def __init__(self, data):
self.data = data
def map(self, function):
Call `map` on the items in `data` using the provided `function`
return FuncionalW(map(function, self.data))
def reduce(self, function):
Call `reduce` on the items in `data` using the provided `function`
return reduce(function, self.data)
def filter(self, function):
Call `filter` on the items in `data` using the provided `function`
return FuncionalW(filter(function, self.data))
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __getattr__(self, name): return getattr(self.data, name)
def __getitem__(self, k): return self.data.__getitem__(k)
def __repr__(self): return 'FuncionalW({0})'.format(repr(self.data))
def __str__(self): return 'FuncionalW({0})'.format(str(self.data))
# Exemplo de Map
# Criaremos uma lista
lista = FuncionalW(range(10))
# Criar uma função a ser aplicada nessa lista
f = lambda x: x*x
# Programação Imperativa
resultado1 = FuncionalW([])
for x in lista:
resultado1.append(f(x))
print "Resultado: {}".format(resultado1)
# Funcional
print "Resultado usando Map: {}".format(lista.map(f))
# Exemplo de Reduce
# Criaremos uma lista
lista = FuncionalW(range(1,10))
# Criar uma função a ser aplicada nessa lista
f = lambda x,y: x*y
# Programação Imperativa
produtoria = 1
for x in lista:
produtoria = f(produtoria,x)
print "Resultado: {}".format(produtoria)
# Funcional
print "Resultado usando Reduce: {}".format(lista.reduce(f))
# EXERCICIO
dataset = FuncionalW(range(10))
# Multiplique cada elemento por 5
mapResult = dataset.map(lambda x : x*5)
# Filtre eliminando os elementos ímpares
# No Python "x % 2" é o resultado do resto da divisão de x por 2
filterResult = dataset.filter(lambda x: x % 2 == 0)
# Some os elementos
reduceResult = dataset.reduce(lambda x, y : x+y)
print 'mapResult: {0}'.format(mapResult)
print '\nfilterResult: {0}'.format(filterResult)
print '\nreduceResult: {0}'.format(reduceResult)
assert mapResult == FuncionalW([0, 5, 10, 15, 20, 25, 30, 35, 40, 45]),"Valor incorreto para mapResult"
print "Teste 1 OK"
assert filterResult == FuncionalW([0, 2, 4, 6, 8]), "Valor incorreto para filterResult"
print "Teste 2 OK"
assert reduceResult == 45, "Valor incorreto para reduceResult"
print "Teste 3 OK"
dataset = FuncionalW(range(10))
Soma = (dataset
.map(lambda x: x*5)
.filter(lambda x: x%2==0)
.reduce(lambda x,y: x+y)
)
print Soma
# EXERCICIO
# split() divide a string em palavras
Texto = FuncionalW("Esse texto tem varias palavras cada linha tem palavras escritas Esse texto esta escrito".split())
# Vamos fazer uma contagem da palavra 'palavras' no texto
# Crie uma função lambda que recebe duas entradas e retorna se são iguais ou não
Igual = lambda x,y: x == y
# Crie uma função lambda que utiliza a função Igual para detectar se a entrada é igual a palavra 'palavras'
DetectaPalavra = lambda x: Igual(x,"palavras")
# 1) Filtre as palavras iguais a 'palavras'
# 2) Mapeie todos os elementos para o valor 1
# 3) Reduza para a somatória
contagem = (Texto
.filter(DetectaPalavra)
.map(lambda x: 1)
.reduce(lambda x,y : x + y)
)
print "Existem {} ocorrências de 'palavras'".format(contagem)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (1b) Indentações
Step2: (1c) Funções
Step3: (1d) Tipos Especiais
Step4: (1e) Iteradores
Step5: (1f) Geradores e List Comprehension
Step6: Quando precisamos trabalhar com listas muito grandes, mas sem a necessidade de acessar os elementos aleatóriamente, podemos utilizar os geradores.
Step7: (1g) Arquivos
Step8: Parte 2
Step9: (2b) Multiplicação elemento-a-elemento e produto interno
Step10: (2c) Multiplicação de Matriz
Step11: (2d) Slices
Step12: Parte 3
Step13: As funções lambdas tem restrições em relação a expressão computada. Essa expressão não pode conter print ou incremento +=, por exemplo.
Step14: (3b) Lógica Funcional
Step15: (3c) Funções de Alta Ordem
Step16: Esse tipo de função ajuda a criar um código declarativo, em que o próprio código auto-explica o que está sendo feito
Step20: (3d) Map, Reduce, Filter
Step21: Para reduzir o tamanho do código e facilitar a leitura, podemos compor as funções em sequência
|
4,784
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
size = 18
params = {'legend.fontsize': 'Large',
'axes.labelsize': size,
'axes.titlesize': size,
'xtick.labelsize': size*0.75,
'ytick.labelsize': size*0.75}
plt.rcParams.update(params)
digitsX = np.load('./data/Sign-language-digits-dataset/X.npy')
digitsY = np.load('./data/Sign-language-digits-dataset/Y.npy')
K = digitsX.shape[0]
img_size = digitsX.shape[1]
digitsX_flatten = digitsX.reshape(K,img_size*img_size)
print('Size of Input Data Matrix:', digitsX.shape)
print('Size of Flattned Input Data Matrix:', digitsX_flatten.shape)
print('Size of label Data Matrix:', digitsY.shape)
selected = [260, 1400]
plt.subplot(1, 2, 1), plt.imshow(digitsX[selected[0]].reshape(img_size, img_size)), plt.axis('off')
plt.subplot(1, 2, 2), plt.imshow(digitsX[selected[1]].reshape(img_size, img_size)), plt.axis('off')
plt.show()
print('Labels corresponding to figures:', digitsY[selected,])
# Preprocessing of original Dogs and Cats Pictures
# Adapted from https://medium.com/@mrgarg.rajat/kaggle-dogs-vs-cats-challenge-complete-step-by-step-guide-part-1-a347194e55b1
# RGB channels are collapsed in GRAYSCALE
# Images are resampled to 64x64
import os, cv2 # cv2 -- OpenCV
train_dir = './data/DogsCats/train/'
rows = 64
cols = 64
train_images = sorted([train_dir+i for i in os.listdir(train_dir)])
def read_image(file_path):
image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
return cv2.resize(image, (rows, cols),interpolation=cv2.INTER_CUBIC)
def prep_data(images):
m = len(images)
X = np.ndarray((m, rows, cols), dtype=np.uint8)
y = np.zeros((m,))
print("X.shape is {}".format(X.shape))
for i,image_file in enumerate(images) :
image = read_image(image_file)
X[i,] = np.squeeze(image.reshape((rows, cols)))
if 'dog' in image_file.split('/')[-1].lower():
y[i] = 1
elif 'cat' in image_file.split('/')[-1].lower():
y[i] = 0
if i%5000 == 0 :
print("Proceed {} of {}".format(i, m))
return X,y
X_train, y_train = prep_data(train_images)
np.save('./data/DogsCats/X.npy', X_train)
np.save('./data/DogsCats/Y.npy', y_train)
DogsCatsX = np.load('./data/DogsCats/X.npy')
DogsCatsY = np.load('./data/DogsCats/Y.npy')
K = DogsCatsX.shape[0]
img_size = DogsCatsX.shape[1]
DogsCatsX_flatten = DogsCatsX.reshape(K,img_size*img_size)
print('Size of Input Data Matrix:', DogsCatsX.shape)
print('Size of Flattned Input Data Matrix:', DogsCatsX_flatten.shape)
print('Size of label Data Matrix:', DogsCatsY.shape)
selected = [260, 16000]
plt.subplot(1, 2, 1), plt.imshow(DogsCatsX[selected[0]].reshape(img_size, img_size)), plt.axis('off')
plt.subplot(1, 2, 2), plt.imshow(DogsCatsX[selected[1]].reshape(img_size, img_size)), plt.axis('off')
plt.show()
print('Labels corresponding to figures:', DogsCatsY[selected,])
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
def get_dataset(dataset_name, forze_binary=False):
Loads the selected dataset, among two options: DogsCats or digits.
If dataset_name == 'digits', you can take a dataset with two classes only,
using forze_binary == True
if dataset_name == 'DogsCats':
X = DogsCatsX_flatten
y = DogsCatsY
elif dataset_name == 'digits':
if forze_binary:
#Zero and Ones are one hot encoded in columns 1 and 4
X0 = digitsX_flatten[np.argmax(digitsY, axis=1)==1,]
X1 = digitsX_flatten[np.argmax(digitsY, axis=1)==4,]
X = np.vstack((X0, X1))
y = np.zeros(X.shape[0])
y[X0.shape[0]:] = 1
else:
X = digitsX_flatten
y = digitsY
else:
print("-- ERROR: Unknown dataset")
return
# Joint normalization of all data. For images [-.5, .5] scaling is frequent
min_max_scaler = MinMaxScaler(feature_range=(-.5, .5))
X = min_max_scaler.fit_transform(X)
# Generate train and validation data, shuffle
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True)
return X_train, X_val, y_train, y_val
# Define some useful functions
def logistic(t):
Computes the logistic function
return 1.0 / (1 + np.exp(-t))
def forward(w,b,x):
Computes the network output
# return logistic(x.dot(w) + b)
return logistic(x @ w + b)
def backward(y, y_hat, x):
Computes the gradient of the loss function for a single sample x with
ouput y_hat, given label y.
# w_grad = x.T.dot((1-y)*y_hat - y*(1-y_hat))/len(y)
# b_grad = np.sum((1-y)*y_hat - y*(1-y_hat))/len(y)
w_grad = x.T @ (y_hat - y) / len(y)
b_grad = np.mean(y_hat - y)
return w_grad, b_grad
def accuracy(y, y_hat):
return np.mean(y == (y_hat >= 0.5))
def loss(y, y_hat):
return - (y @ np.log(y_hat) + (1 - y) @ np.log(1 - y_hat)) / len(y)
X_train, X_val, y_train, y_val = get_dataset('digits', forze_binary=True)
#Neural Network Training
epochs = 50
rho = .05 # Use this setting for Sign Digits Dataset
#Parameter initialization
w = .1 * np.random.randn(X_train.shape[1])
b = .1 * np.random.randn(1)
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in np.arange(epochs):
y_hat_train = forward(w, b, X_train)
y_hat_val = forward(w, b, X_val)
w_grad, b_grad = backward(y_train, y_hat_train, X_train)
w = w - rho * w_grad
b = b - rho * b_grad
loss_train[epoch] = loss(y_train, y_hat_train)
loss_val[epoch] = loss(y_val, y_hat_val)
acc_train[epoch] = accuracy(y_train, y_hat_train)
acc_val[epoch] = accuracy(y_val, y_hat_val)
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
dataset = 'digits'
X_train, X_val, y_train, y_val = get_dataset('digits')
# Define some useful functions
def softmax(t):
Compute softmax values for each sets of scores in t.
e_t = np.exp(t)
return e_t / e_t.sum(axis=1, keepdims=True)
def forward(w, b, x):
# Calcula la salida de la red
return softmax(x @ w.T + b.T)
def backward(y, y_hat, x):
#Calcula los gradientes
W_grad = (y_hat - y).T @ x / len(y)
b_grad = (y_hat - y).T.mean(axis=1, keepdims=True)
return W_grad, b_grad
def accuracy(y, y_hat):
return np.mean(np.argmax(y, axis=1) == np.argmax(y_hat, axis=1))
def loss(y, y_hat):
return - np.sum(y * np.log(y_hat)) / len(y)
# Neural Network Training
epochs = 300
rho = .1
#Parameter initialization
W = .1 * np.random.randn(y_train.shape[1], X_train.shape[1])
b = .1 * np.random.randn(y_train.shape[1], 1)
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in np.arange(epochs):
print(f"Epoch {epoch} out of {epochs} \r", end="")
y_hat_train = forward(W, b, X_train)
y_hat_val = forward(W, b, X_val)
W_grad, b_grad = backward(y_train, y_hat_train, X_train)
W = W - rho * W_grad
b = b - rho * b_grad
loss_train[epoch] = loss(y_train, y_hat_train)
loss_val[epoch] = loss(y_val, y_hat_val)
acc_train[epoch] = accuracy(y_train, y_hat_train)
acc_val[epoch] = accuracy(y_val, y_hat_val)
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
# Define some useful functions
def logistic(t):
return 1.0 / (1 + np.exp(-t))
def forward(W1, b1, w2, b2, x):
#Calcula la salida de la red
h = x.dot(W1.T) + b1
y_hat = logistic(h.dot(w2) + b2)
#Provide also hidden units value for backward gradient step
return h, y_hat
def backward(y, y_hat, h, x, w2):
#Calcula los gradientes
w2_grad = h.T.dot(y_hat - y) / len(y)
b2_grad = np.sum(y_hat - y) / len(y)
W1_grad = ((w2[np.newaxis,] * ((1 - h)**2) * (y_hat - y)[:,np.newaxis]).T.dot(x)) / len(y)
b1_grad = ((w2[np.newaxis,] * ((1 - h)**2) * (y_hat - y)[:,np.newaxis]).sum(axis=0)) / len(y)
return w2_grad, b2_grad, W1_grad, b1_grad
def accuracy(y, y_hat):
return np.mean(y == (y_hat >= 0.5))
def loss(y, y_hat):
return - np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)) / len(y)
def evaluate_model(
X_train, X_val, y_train, y_val, n_h=5, epochs=1000, rho=.005):
W1 = .01 * np.random.randn(n_h, X_train.shape[1])
b1 = .01 * np.random.randn(n_h)
w2 = .01 * np.random.randn(n_h)
b2 = .01 * np.random.randn(1)
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in np.arange(epochs):
print(f'Current epoch: {epoch + 1} \r', end="")
h, y_hat_train = forward(W1, b1, w2, b2, X_train)
dum, y_hat_val = forward(W1, b1, w2, b2, X_val)
w2_grad, b2_grad, W1_grad, b1_grad = backward(y_train, y_hat_train, h, X_train, w2)
W1 = W1 - rho/10 * W1_grad
b1 = b1 - rho/10 * b1_grad
w2 = w2 - rho * w2_grad
b2 = b2 - rho * b2_grad
loss_train[epoch] = loss(y_train, y_hat_train)
loss_val[epoch] = loss(y_val, y_hat_val)
acc_train[epoch] = accuracy(y_train, y_hat_train)
acc_val[epoch] = accuracy(y_val, y_hat_val)
return loss_train, loss_val, acc_train, acc_val
dataset = 'DogsCats'
X_train, X_val, y_train, y_val = get_dataset(dataset)
loss_train, loss_val, acc_train, acc_val = evaluate_model(
X_train, X_val, y_train, y_val, n_h=5, epochs=1000, rho=0.05)
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
dataset = 'digits'
X_train, X_val, y_train, y_val = get_dataset(dataset, forze_binary=True)
loss_train, loss_val, acc_train, acc_val = evaluate_model(
X_train, X_val, y_train, y_val, n_h=5, epochs=10000, rho=0.001)
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
x_array = np.linspace(-6,6,100)
y_array = np.clip(x_array, 0, a_max=None)
plt.plot(x_array, y_array)
plt.title('ReLU activation function')
plt.show()
import torch
x = torch.rand((100,200))
digitsX_flatten_tensor = torch.from_numpy(digitsX_flatten)
print(x.type())
print(digitsX_flatten_tensor.size())
print('Size of tensor x:', x.size())
print('Tranpose of vector has size', x.t().size()) #Transpose and compute size
print('Extracting upper left matrix of size 3 x 3:', x[:3,:3])
print(x.mm(x.t()).size()) #mm for matrix multiplications
xpx = x.add(x)
xpx2 = torch.add(x,x)
print((xpx!=xpx2).sum()) #Since all are equal, count of different terms is zero
if torch.cuda.is_available():
device = torch.device('cuda')
x = x.to(device)
y = x.add(x)
y = y.to('cpu')
else:
print('No GPU card is available')
x.requires_grad = True
y = (3 * torch.log(x)).sum()
y.backward()
print(x.grad[:2,:2])
print(3/x[:2,:2])
x.requires_grad = False
x.grad.zero_()
print('Automatic gradient calculation is deactivated, and gradients set to zero')
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
dataset = 'digits'
#Joint normalization of all data. For images [-.5, .5] scaling is frequent
min_max_scaler = MinMaxScaler(feature_range=(-.5, .5))
X = min_max_scaler.fit_transform(digitsX_flatten)
#Generate train and validation data, shuffle
X_train, X_val, y_train, y_val = train_test_split(X, digitsY, test_size=0.2, random_state=42, shuffle=True)
#Convert to Torch tensors
X_train_torch = torch.from_numpy(X_train)
X_val_torch = torch.from_numpy(X_val)
y_train_torch = torch.from_numpy(y_train)
y_val_torch = torch.from_numpy(y_val)
# Define some useful functions
def softmax(t):
Compute softmax values for each sets of scores in t
return t.exp() / t.exp().sum(-1).unsqueeze(-1)
def model(w,b,x):
#Calcula la salida de la red
return softmax(x.mm(w) + b)
def accuracy(y, y_hat):
return (y.argmax(axis=-1) == y_hat.argmax(axis=-1)).float().mean()
def nll(y, y_hat):
return -(y * y_hat.log()).mean()
# Parameter initialization
W = .1 * torch.randn(X_train_torch.size()[1], y_train_torch.size()[1])
W.requires_grad_()
b = torch.zeros(y_train_torch.size()[1], requires_grad=True)
epochs = 500
rho = .5
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
# Network training
for epoch in range(epochs):
print(f'Current epoch: {epoch + 1} \r', end="")
#Compute network output and cross-entropy loss
pred = model(W,b,X_train_torch)
loss = nll(y_train_torch, pred)
#Compute gradients
loss.backward()
#Deactivate gradient automatic updates
with torch.no_grad():
#Computing network performance after iteration
loss_train[epoch] = loss.item()
acc_train[epoch] = accuracy(y_train_torch, pred).item()
pred_val = model(W, b, X_val_torch)
loss_val[epoch] = nll(y_val_torch, pred_val).item()
acc_val[epoch] = accuracy(y_val_torch, pred_val).item()
#Weight update
W -= rho * W.grad
b -= rho * b.grad
#Reset gradients
W.grad.zero_()
b.grad.zero_()
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
from torch import nn
class my_multiclass_net(nn.Module):
def __init__(self, nin, nout):
This method initializes the network parameters
Parameters nin and nout stand for the number of input parameters (features in X)
and output parameters (number of classes)
super().__init__()
self.W = nn.Parameter(.1 * torch.randn(nin, nout))
self.b = nn.Parameter(torch.zeros(nout))
def forward(self, x):
return softmax(x.mm(self.W) + self.b)
def softmax(t):
Compute softmax values for each sets of scores in t
return t.exp() / t.exp().sum(-1).unsqueeze(-1)
my_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])
epochs = 500
rho = .5
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in range(epochs):
print(f'Current epoch: {epoch + 1} \r', end="")
#Compute network output and cross-entropy loss
pred = my_net(X_train_torch)
loss = nll(y_train_torch, pred)
#Compute gradients
loss.backward()
#Deactivate gradient automatic updates
with torch.no_grad():
#Computing network performance after iteration
loss_train[epoch] = loss.item()
acc_train[epoch] = accuracy(y_train_torch, pred).item()
pred_val = my_net(X_val_torch)
loss_val[epoch] = nll(y_val_torch, pred_val).item()
acc_val[epoch] = accuracy(y_val_torch, pred_val).item()
#Weight update
for p in my_net.parameters():
p -= p.grad * rho
#Reset gradients
my_net.zero_grad()
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
from torch import nn
class my_multiclass_net(nn.Module):
def __init__(self, nin, nout):
Note that now, we do not even need to initialize network parameters ourselves
super().__init__()
self.lin = nn.Linear(nin, nout)
def forward(self, x):
return self.lin(x)
loss_func = nn.CrossEntropyLoss()
my_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])
epochs = 500
rho = .1
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in range(epochs):
print(f'Current epoch: {epoch + 1} \r', end="")
#Compute network output and cross-entropy loss
pred = my_net(X_train_torch)
loss = loss_func(pred, y_train_torch.argmax(axis=-1))
#Compute gradients
loss.backward()
#Deactivate gradient automatic updates
with torch.no_grad():
#Computing network performance after iteration
loss_train[epoch] = loss.item()
acc_train[epoch] = accuracy(y_train_torch, pred).item()
pred_val = my_net(X_val_torch)
loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()
acc_val[epoch] = accuracy(y_val_torch, pred_val).item()
#Weight update
for p in my_net.parameters():
p -= p.grad * rho
#Reset gradients
my_net.zero_grad()
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
from torch import optim
my_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])
opt = optim.SGD(my_net.parameters(), lr=0.1)
epochs = 500
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in range(epochs):
print(f'Current epoch: {epoch + 1} \r', end="")
#Compute network output and cross-entropy loss
pred = my_net(X_train_torch)
loss = loss_func(pred, y_train_torch.argmax(axis=-1))
#Compute gradients
loss.backward()
#Deactivate gradient automatic updates
with torch.no_grad():
#Computing network performance after iteration
loss_train[epoch] = loss.item()
acc_train[epoch] = accuracy(y_train_torch, pred).item()
pred_val = my_net(X_val_torch)
loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()
acc_val[epoch] = accuracy(y_val_torch, pred_val).item()
opt.step()
opt.zero_grad()
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
from torch.utils.data import TensorDataset, DataLoader
train_ds = TensorDataset(X_train_torch, y_train_torch)
train_dl = DataLoader(train_ds, batch_size=64)
from torch import optim
my_net = my_multiclass_net(X_train_torch.size()[1], y_train_torch.size()[1])
opt = optim.SGD(my_net.parameters(), lr=0.1)
epochs = 200
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in range(epochs):
print(f'Current epoch: {epoch + 1} \r', end="")
for xb, yb in train_dl:
#Compute network output and cross-entropy loss for current minibatch
pred = my_net(xb)
loss = loss_func(pred, yb.argmax(axis=-1))
#Compute gradients and optimize parameters
loss.backward()
opt.step()
opt.zero_grad()
#At the end of each epoch, evaluate overall network performance
with torch.no_grad():
#Computing network performance after iteration
pred = my_net(X_train_torch)
loss_train[epoch] = loss_func(pred, y_train_torch.argmax(axis=-1)).item()
acc_train[epoch] = accuracy(y_train_torch, pred).item()
pred_val = my_net(X_val_torch)
loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()
acc_val[epoch] = accuracy(y_val_torch, pred_val).item()
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
my_net = nn.Sequential(
nn.Linear(X_train_torch.size()[1], 200),
nn.ReLU(),
nn.Linear(200,50),
nn.ReLU(),
nn.Linear(50,20),
nn.ReLU(),
nn.Linear(20,y_train_torch.size()[1])
)
opt = optim.SGD(my_net.parameters(), lr=0.1)
epochs = 200
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in range(epochs):
print(f'Current epoch: {epoch + 1} \r', end="")
for xb, yb in train_dl:
#Compute network output and cross-entropy loss for current minibatch
pred = my_net(xb)
loss = loss_func(pred, yb.argmax(axis=-1))
#Compute gradients and optimize parameters
loss.backward()
opt.step()
opt.zero_grad()
#At the end of each epoch, evaluate overall network performance
with torch.no_grad():
#Computing network performance after iteration
pred = my_net(X_train_torch)
loss_train[epoch] = loss_func(pred, y_train_torch.argmax(axis=-1)).item()
acc_train[epoch] = accuracy(y_train_torch, pred).item()
pred_val = my_net(X_val_torch)
loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()
acc_val[epoch] = accuracy(y_val_torch, pred_val).item()
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
print('Validation accuracy with this net:', acc_val[-1])
dataset = 'digits'
#Generate train and validation data, shuffle
X_train, X_val, y_train, y_val = train_test_split(digitsX[:,np.newaxis,:,:], digitsY, test_size=0.2, random_state=42, shuffle=True)
#Convert to Torch tensors
X_train_torch = torch.from_numpy(X_train)
X_val_torch = torch.from_numpy(X_val)
y_train_torch = torch.from_numpy(y_train)
y_val_torch = torch.from_numpy(y_val)
train_ds = TensorDataset(X_train_torch, y_train_torch)
train_dl = DataLoader(train_ds, batch_size=64)
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
my_net = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.AvgPool2d(4),
Lambda(lambda x: x.view(x.size(0), -1)),
)
opt = optim.SGD(my_net.parameters(), lr=0.1)
epochs = 2500
loss_train = np.zeros(epochs)
loss_val = np.zeros(epochs)
acc_train = np.zeros(epochs)
acc_val = np.zeros(epochs)
for epoch in range(epochs):
print(f'Número de épocas: {epoch + 1}\r', end="")
for xb, yb in train_dl:
#Compute network output and cross-entropy loss for current minibatch
pred = my_net(xb)
loss = loss_func(pred, yb.argmax(axis=-1))
#Compute gradients and optimize parameters
loss.backward()
opt.step()
opt.zero_grad()
#At the end of each epoch, evaluate overall network performance
with torch.no_grad():
# Computing network performance after iteration
pred = my_net(X_train_torch)
loss_train[epoch] = loss_func(pred, y_train_torch.argmax(axis=-1)).item()
acc_train[epoch] = accuracy(y_train_torch, pred).item()
pred_val = my_net(X_val_torch)
loss_val[epoch] = loss_func(pred_val, y_val_torch.argmax(axis=-1)).item()
acc_val[epoch] = accuracy(y_val_torch, pred_val).item()
plt.figure(figsize=(14,5))
plt.subplot(1, 2, 1), plt.plot(loss_train, 'b'), plt.plot(loss_val, 'r'), plt.legend(['train', 'val']), plt.title('Cross-entropy loss')
plt.subplot(1, 2, 2), plt.plot(acc_train, 'b'), plt.plot(acc_val, 'r'), plt.legend(['train', 'val']), plt.title('Accuracy')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color='teal'> 1. Introduction and purpose of this Notebook </font>
Step3: <font color='olive'>Dogs vs Cats data set</font>
Step8: <font color='teal'> 2.2. Logistic Regression as a Simple Neural Network </font>
Step10: <font color='olive'>Exercise</font>
Step11: <font color='olive'>Exercise</font>
Step12: <font color='olive'>Results in Dogs vs Cats dataset ($epochs = 1000$ and $\rho = 0.05$)</font>
Step13: <font color='olive'>Results in Binary Sign Digits Dataset ($epochs = 10000$ and $\rho = 0.001$)</font>
Step14: <font color='olive'>Exercises</font>
Step15: <font color='teal'> 3. Implementing Deep Networks with PyTorch </font>
Step16: Tensors can be converted back to numpy arrays
Step17: Adding underscore performs operations "in place", e.g., x.add_(y)
Step18: <font color='teal'> 3.3. Automatic gradient calculation </font>
Step20: <font color='olive'>Exercise</font>
Step21: Syntaxis is a bit different because input variables are tensors, not arrays
Step22: It is important to deactivate gradient updates after the network has been evaluated on training data, and gradients of the loss function have been computed
Step25: <font color='olive'> 3.4.2. Using torch nn module </font>
Step27: nn.Module comes with several kinds of pre-defined layers, thus making it even simpler to implement neural networks
Step28: Note faster convergence is observed in this case. It is actually due to a more convenient initialization of the hidden layer
Step29: Note network optimization is carried out outside torch.no_grad() but network evaluation (other than forward output calculation for the training patterns) still need to deactivate gradient updates
Step30: <font color='olive'> Exercise </font>
Step31: <font color='olive'> 3.4.4. Multi Layer networks using nn.Sequential </font>
Step32: <font color='teal'> 3.5. Generalization</font>
|
4,785
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import chap01soln
resp = chap01soln.ReadFemResp()
resp_numkdhh = resp.numkdhh
resp_numkdhh_vc = resp_numkdhh.value_counts().sort_index()
print resp_numkdhh_vc
resp_numkdhh_pmf_original = resp_numkdhh_vc / len(resp_numkdhh)
resp_numkdhh_pmf_original.plot(kind = 'bar', legend = True, label= 'original')
def BiasPmf(pmf, label=''):
Returns the Pmf with oversampling proportional to value.
If pmf is the distribution of true values, the result is the
distribution that would be seen if values are oversampled in
proportion to their values; for example, if you ask students
how big their classes are, large classes are oversampled in
proportion to their size.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
import pandas as pd
resp_numkdhh_pmf_biased = pd.DataFrame()
biased_list = [ index * row for index, row in resp_numkdhh_pmf_original.iteritems()]
resp_numkdhh_pmf_biased = resp_numkdhh_pmf_biased.append( biased_list)
resp_numkdhh_pmf_biased.columns = ['biased']
resp_numkdhh_pmf_biased.plot(kind = 'bar')
resp_numkdhh_pmf_original = pd.DataFrame(resp_numkdhh_pmf_original, columns=['original'])
resp_numkdhh_pmf = resp_numkdhh_pmf_original
resp_numkdhh_pmf['biased'] = resp_numkdhh_pmf_biased.biased
resp_numkdhh_pmf.plot(kind='bar')
resp_numkdhh_pmf
resp_numkdhh_pmf_biased.sum()
sum([row * index for index, row in resp_numkdhh_pmf.biased.iteritems()])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a PMF of <tt>numkdhh</tt>, the number of children under 18 in the respondent's household.
Step2: Display the PMF.
Step4: Define <tt>BiasPmf</tt>.
Step5: Make a the biased Pmf of children in the household, as observed if you surveyed the children instead of the respondents.
Step6: Display the actual Pmf and the biased Pmf on the same axes.
Step7: Compute the means of the two Pmfs.
|
4,786
|
<ASSISTANT_TASK:>
Python Code:
import graphlab as gl
from nltk.stem import *
train = gl.SFrame.read_csv("../data/train.csv")
test = gl.SFrame.read_csv("../data/test.csv")
desc = gl.SFrame.read_csv("../data/product_descriptions.csv")
# merge train with description
train = train.join(desc, on = 'product_uid', how = 'left')
# merge test with description
test = test.join(desc, on = 'product_uid', how = 'left')
first_doc = train[0]
first_doc
middle_doc = train[37033]
middle_doc
last_doc = train[-1]
last_doc
train['search_term_word_count'] = gl.text_analytics.count_words(train['search_term'])
ranked3doc = train[train['relevance'] == 3]
print ranked3doc.head()
len(ranked3doc)
words_search = gl.text_analytics.tokenize(ranked3doc['search_term'], to_lower = True)
words_description = gl.text_analytics.tokenize(ranked3doc['product_description'], to_lower = True)
words_title = gl.text_analytics.tokenize(ranked3doc['product_title'], to_lower = True)
wordsdiff_desc = []
wordsdiff_title = []
puid = []
search_term = []
ws_count = []
ws_count_used_desc = []
ws_count_used_title = []
for item in xrange(len(ranked3doc)):
ws = words_search[item]
pd = words_description[item]
pt = words_title[item]
diff = set(ws) - set(pd)
if diff is None:
diff = 0
wordsdiff_desc.append(diff)
diff2 = set(ws) - set(pt)
if diff2 is None:
diff2 = 0
wordsdiff_title.append(diff2)
puid.append(ranked3doc[item]['product_uid'])
search_term.append(ranked3doc[item]['search_term'])
ws_count.append(len(ws))
ws_count_used_desc.append(len(ws) - len(diff))
ws_count_used_title.append(len(ws) - len(diff2))
differences = gl.SFrame({"puid" : puid,
"search term": search_term,
"diff desc" : wordsdiff_desc,
"diff title" : wordsdiff_title,
"ws count" : ws_count,
"ws count used desc" : ws_count_used_desc,
"ws count used title" : ws_count_used_title})
differences.sort(['ws count used desc', 'ws count used title'])
print "No terms used in description : " + str(len(differences[differences['ws count used desc'] == 0]))
print "No terms used in title : " + str(len(differences[differences['ws count used title'] == 0]))
print "No terms used in description and title : " + str(len(differences[(differences['ws count used desc'] == 0) &
(differences['ws count used title'] == 0)]))
import matplotlib.pyplot as plt
%matplotlib inline
#stemmer = SnowballStemmer("english")
stemmer = PorterStemmer()
def stem(word):
singles = [stemmer.stem(plural) for plural in unicode(word, errors='replace').split()]
text = ' '.join(singles)
return text
print "Starting stemming train search term..."
stemmed = train['search_term'].apply(stem)
train['stem_search_term'] = stemmed
print "Starting stemming train product description..."
stemmed = train['product_description'].apply(stem)
train['stem_product_description'] = stemmed
print "Starting stemming train product title..."
stemmed = train['product_title'].apply(stem)
train['stem_product_title'] = stemmed
print "Starting stemming test search term..."
stemmed = test['search_term'].apply(stem)
test['stem_search_term'] = stemmed
print "Starting stemming test product description..."
stemmed = test['product_description'].apply(stem)
test['stem_product_description'] = stemmed
print "Starting stemming test product title..."
stemmed = test['product_title'].apply(stem)
test['stem_product_title'] = stemmed
train['search_term_word_count'] = gl.text_analytics.count_words(train['stem_search_term'])
train_search_tfidf = gl.text_analytics.tf_idf(train['search_term_word_count'])
train['search_tfidf'] = train_search_tfidf
train['product_desc_word_count'] = gl.text_analytics.count_words(train['stem_product_description'])
train_desc_tfidf = gl.text_analytics.tf_idf(train['product_desc_word_count'])
train['desc_tfidf'] = train_desc_tfidf
train['product_title_word_count'] = gl.text_analytics.count_words(train['stem_product_title'])
train_title_tfidf = gl.text_analytics.tf_idf(train['product_title_word_count'])
train['title_tfidf'] = train_title_tfidf
train['distance_desc'] = train.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['desc_tfidf']))
#train['distance_desc_sqrt'] = train['distance_desc'] ** 2
train['distance_title'] = train.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['title_tfidf']))
#train['distance_title_sqrt'] = train['distance_title'] ** 3
model1 = gl.linear_regression.create(train, target = 'relevance',
features = ['distance_desc', 'distance_title'],
validation_set = None)
# model1 = gl.linear_regression.create(train, target = 'relevance',
# features = ['distance_desc', 'distance_desc_sqrt', 'distance_title', 'distance_title_sqrt'],
# validation_set = None)
#let's take a look at the weights before we plot
model1.get("coefficients")
test['search_term_word_count'] = gl.text_analytics.count_words(test['stem_search_term'])
test_search_tfidf = gl.text_analytics.tf_idf(test['search_term_word_count'])
test['search_tfidf'] = test_search_tfidf
test['product_desc_word_count'] = gl.text_analytics.count_words(test['stem_product_description'])
test_desc_tfidf = gl.text_analytics.tf_idf(test['product_desc_word_count'])
test['desc_tfidf'] = test_desc_tfidf
test['product_title_word_count'] = gl.text_analytics.count_words(test['stem_product_title'])
test_title_tfidf = gl.text_analytics.tf_idf(test['product_title_word_count'])
test['title_tfidf'] = test_title_tfidf
test['distance_desc'] = test.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['desc_tfidf']))
#test['distance_desc_sqrt'] = test['distance_desc'] ** 2
test['distance_title'] = test.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['title_tfidf']))
#test['distance_title_sqrt'] = test['distance_title'] ** 3
'''
predictions_test = model1.predict(test)
test_errors = predictions_test - test['relevance']
RSS_test = sum(test_errors * test_errors)
print RSS_test
'''
predictions_test = model1.predict(test)
predictions_test
submission = gl.SFrame(test['id'])
submission.add_column(predictions_test)
submission.rename({'X1': 'id', 'X2':'relevance'})
submission['relevance'] = submission.apply(lambda x: 3.0 if x['relevance'] > 3.0 else x['relevance'])
submission['relevance'] = submission.apply(lambda x: 1.0 if x['relevance'] < 1.0 else x['relevance'])
submission['relevance'] = submission.apply(lambda x: str(x['relevance']))
submission.export_csv('../data/submission2.csv', quote_level = 3)
#gl.canvas.set_target('ipynb')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data from CSV files
Step2: Data merging
Step3: Let's explore some data
Step4: 'angle bracket' search term is not contained in the body. 'angle' would be after stemming however 'bracket' is not.
Step5: only 'wood' is present from search term
Step6: 'sheer' and 'courtain' are present and that's all
Step7: Stemming
Step8: TF-IDF with linear regression
|
4,787
|
<ASSISTANT_TASK:>
Python Code:
a = 6
b = 15
if a < b:
m = a
else:
m = b
m
m = a if a < b else b
m
k = 1
while k < 10**9:
print(k)
k = k * 2
k = 1
n = 0
while k < 10**9:
k = k * 2
n = n + 1
print(n)
k = 1
while True:
k = 2*k
print(k)
for i in range(10):
if i == 7:
continue
print(i)
def syracuse(n):
if n % 2 == 0:
return n // 2
else:
return 3 * n + 1
syracuse(13)
syracuse(40)
n = 1234097125
while n != 1:
n = syracuse(n)
print(n, end=' ')
n = 1234097125
while n != 1:
print(n, end=' ')
n = syracuse(n)
print(n)
n = 12
L = []
for i in range(1, n+1):
reste = n % i
if reste == 0:
L.append(i)
print(L)
from math import sqrt
def diviseurs(n):
L = []
sq = int(sqrt(n))
for i in range(1, sq+1):
reste = n % i
if reste == 0:
L.append(i)
L.append(n//i)
return L
diviseurs(12)
diviseurs(7)
def est_premier(n):
div = diviseurs(n)
return len(div) == 2
est_premier(12)
est_premier(13)
def est_premier_2(n):
sq = int(sqrt(n))
for i in range(2, sq+1):
if n % i == 0:
return False
return True
est_premier_2(12)
est_premier_2(13)
L = [123, 535, 74764, 14379, 56452546, 2356, 3, 4, 8]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 15 Fonctions def
Step2: Pour un nombre $x\geq1$, trouver l'unique valeur entière $n$ vérifiant $$2^{n−1} < x < 2^n,$$ c’est-à-dire le plus petit entier $n$ vérifiant $x < 2^n$.
Step3: 16.1 Interrompre une boucle avec break
Step4: 16.2 Continuer une boucle à l'itération suivante avec continue
Step5: 17 Exemples (def + while + for + if)
Step6: 17.2 Énumérer les diviseurs d'un nombre entier
Step7: 17.3 Tester si un nombre est premier
Step8: 17.4 Trouver les nombres premiers jumeaux
|
4,788
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, is_training):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
layer = tf.layers.batch_normalization(layer, training = is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, is_training):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
is_training = tf.placeholder(tf.bool)
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
bias = tf.Variable(tf.zeros(out_channels))
conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO
|
4,789
|
<ASSISTANT_TASK:>
Python Code:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimesions:', image.shape)
#plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
plt.imshow(image,cmap='gray')
#whos
#%reset
import math
import code
from itertools import cycle
class Line_with_info:
def __init__(self, x1, y1,x2,y2):
self.x1 = x1
self.y1 = y1
self.x2=x2
self.y2=y2
self.slope=(self.y2-self.y1)/(self.x2-self.x1)
self.slope_angle=math.atan(self.slope)
def get_line(self):
points_data=[[self.x1,self.y1,self.x2,self.y2]];
#code.interact(local=locals())
#Use raise SystemExit
return points_data
def get_slope(self):
return self.slope
def get_slope_angle(self):
return self.slope_angle
def get_distance(x1,y1,x2,y2):
return math.sqrt(math.pow((x2-x1),2)+math.pow((y2-y1,2)))
def get_slope(x1,y1,x2,y2):
return abs(math.atan(abs((y2-y1))/abs((x2-x1))))
def grayscale(img):
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
Applies the Canny transform
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
Applies a Gaussian Noise kernel
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines,y_min=350,y_max=550, color=[255, 0, 0], thickness=12,theta_tolerance_deg=3,angle_upper_limit_deg=40,angle_lower_limit=25):
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
#for line in lines:
#for x1,y1,x2,y2 in line:
#cv2.line(img, (x1, y1), (x2, y2), [0,255,0], thickness)
#Angle tolerance used to divide lines into groups
theta_tolerance=math.radians(theta_tolerance_deg)
#Angule limits of the lanes (absolute)
angle_upper_limit=math.radians(40)
angle_lower_limit=math.radians(25)
#Wrap lines in objects and sort them
lines_objects=[]
for line in lines:
for x1,y1,x2,y2 in line:
g=Line_with_info(x1,y1,x2,y2)
lines_objects.append(Line_with_info(x1,y1,x2,y2))
#print("Slope %s"%math.degrees(g.get_slope()))
#print("Coordinates %s,%s,%s,%s"%(x1,y1,x2,y2))
sorted_lines=sorted(lines_objects, key=lambda Line_with_info: Line_with_info.get_slope_angle())
#Initialize vars
first_lane_group=[]
second_lane_group=[]
sorted_left_lane=[]
sorted_right_lane=[]
first_group_done=False
second_group_done=False
bnd_x_min_left=0
bnd_x_max_left=0
bnd_x_min_right=0
bnd_x_max_right=0
Left_lane_used=False
Right_lane_used=False
#Divide lanes into two groups based on slope
for i,asc_line in enumerate(sorted_lines[:-1]):
if not(first_group_done):
if (abs(sorted_lines[i+1].slope_angle-asc_line.slope_angle)<theta_tolerance):
first_lane_group.append(asc_line)
first_lane_group.append(sorted_lines[i+1])
#print("avg_slope_1 %s"%asc_line.slope_angle)
#print("avg_slope_2 %s"%sorted_lines[i+1].slope_angle)
#print("Coordinates 1 %s,%s,%s,%s"%(asc_line.x1,asc_line.y1,asc_line.x2,asc_line.y2))
#print("Coordinates 2 %s,%s,%s,%s"%(sorted_lines[i+1].x1,sorted_lines[i+1].y1,sorted_lines[i+1].x2,sorted_lines[i+1].y2))
else:
#print("Pass")
if (len(first_lane_group)>0):
#print("Done first group")
first_group_done=True
elif not(second_group_done):
if (abs(sorted_lines[i+1].slope_angle-asc_line.slope_angle)<theta_tolerance):
second_lane_group.append(asc_line)
second_lane_group.append(sorted_lines[i+1])
#print("avg_slope_1 %s"%asc_line.slope_angle)
#print("avg_slope_2 %s"%sorted_lines[i+1].slope_angle)
#print("Coordinates 1 %s,%s,%s,%s"%(asc_line.x1,asc_line.y1,asc_line.x2,asc_line.y2))
#print("Coordinates 2 %s,%s,%s,%s"%(sorted_lines[i+1].x1,sorted_lines[i+1].y1,sorted_lines[i+1].x2,sorted_lines[i+1].y2))
else:
#print("Pass")
if (len(second_lane_group)>0):
#print("Done Second group")
second_group_done=True
#Sort lines in groups
sorted_first_lines=sorted(first_lane_group, key=lambda Line_with_info: Line_with_info.x1)
sorted_second_lines=sorted(second_lane_group, key=lambda Line_with_info: Line_with_info.x1)
#Identify left-lane group and right lane group
if (len(first_lane_group)>0):
avg_slope_1=sum(fst_lane_grp.get_slope() for fst_lane_grp in first_lane_group)/float(len(first_lane_group))
if (avg_slope_1<0):
sorted_left_lane=first_lane_group
else:
sorted_right_lane=first_lane_group
if (len(sorted_left_lane)>0):
if (len(second_lane_group)>0):
sorted_right_lane=sorted_second_lines
#Average slopes and position. Use linear extrapolation to draw a line across
if (len(sorted_left_lane)>0):
#print("first_lane_group")
avg_slope_left=sum(left_lane_grp.get_slope() for left_lane_grp in sorted_left_lane)/float(len(sorted_left_lane))
gh=math.atan(avg_slope_left)
if (abs(gh)>angle_upper_limit)or(abs(gh)<angle_lower_limit):
cv2.line(img, (draw_lines.prev_coord_left[0], y_max), (draw_lines.prev_coord_left[1], y_min), color, thickness)
else:
avg_x1_left=sum(left_lane_grp.x1 for left_lane_grp in sorted_left_lane)/float(len(sorted_left_lane))
avg_y1_left=sum(left_lane_grp.y1 for left_lane_grp in sorted_left_lane)/float(len(sorted_left_lane))
if (abs(avg_slope_left)<0.0001):
bnd_x_min_left=y_min
bnd_x_max_left=y_max
else:
bnd_x_min_left=-(1/avg_slope_left)*(avg_y1_left-y_min)+avg_x1_left
bnd_x_max_left=-(1/avg_slope_left)*(avg_y1_left-y_max)+avg_x1_left
#print("avg_slope_1 %s"%avg_slope_1)
#print("avg_x1_1 %s"%avg_x1_1)
#print("avg_y1_1 %s"%avg_y1_1)
#gh=math.degrees(math.atan(avg_slope_left))
#print("Angle Left %s"%gh)
Left_lane_used=True
cv2.line(img, (int(bnd_x_max_left), y_max), (int(bnd_x_min_left), y_min), color, thickness)
else:
#print("Left from memory")
cv2.line(img, (draw_lines.prev_coord_left[0], y_max), (draw_lines.prev_coord_left[1], y_min), color, thickness)
#Average slopes and position. Use linear extrapolation to draw a line across
if (len(sorted_right_lane)>0):
#print("second_lane_group")
avg_slope_right=sum(right_lane_grp.get_slope() for right_lane_grp in sorted_right_lane)/float(len(sorted_right_lane))
gh=math.atan(avg_slope_right)
if (abs(gh)>angle_upper_limit)or(abs(gh)<angle_lower_limit):
cv2.line(img, (draw_lines.prev_coord_right[0], y_max), (draw_lines.prev_coord_right[1], y_min), color, thickness)
else:
avg_x1_right=sum(right_lane_grp.x1 for right_lane_grp in sorted_right_lane)/float(len(sorted_right_lane))
avg_y1_right=sum(right_lane_grp.y1 for right_lane_grp in sorted_right_lane)/float(len(sorted_right_lane))
if (abs(avg_slope_right)<0.0001):
bnd_x_min_right=y_min
bnd_x_max_right=y_max
else:
bnd_x_min_right=-(1/avg_slope_right)*(avg_y1_right-y_min)+avg_x1_right
bnd_x_max_right=-(1/avg_slope_right)*(avg_y1_right-y_max)+avg_x1_right
#print("avg_slope_2 %s"%avg_slope_2)
#print("avg_x1_2 %s"%avg_x1_2)
#print("avg_y1_2 %s"%avg_y1_2)
#print("Angle Right %s"%gh)
Right_lane_used=True
cv2.line(img, (int(bnd_x_max_right), y_max), (int(bnd_x_min_right), y_min), color, thickness)
else:
#print("Right from memory")
cv2.line(img, (draw_lines.prev_coord_right[0], y_max), (draw_lines.prev_coord_right[1], y_min), color, thickness)
#Decide weather to update memory lanes or not
if (len(sorted_left_lane)>0)and Left_lane_used:
draw_lines.prev_slope_left=avg_slope_left
draw_lines.prev_coord_left=[int(bnd_x_max_left),int(bnd_x_min_left)]
if (len(sorted_right_lane)>0) and Right_lane_used:
draw_lines.prev_slope_right=avg_slope_right
draw_lines.prev_coord_right=[int(bnd_x_max_right),int(bnd_x_min_right)]
draw_lines.prev_coord_left=[0,0]
draw_lines.prev_coord_right=[0,0]
draw_lines.prev_slope_left=0
draw_lines.prev_slope_right=0
draw_lines.counter=0
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap,y_min=350,y_max=550):
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
#Step6:
draw_lines(line_img, lines,y_min,y_max)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
os.listdir("test_images/")
import math
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images directory.
#Step1: Graying
image = mpimg.imread('test_images/solidWhiteRight.jpg')
image_gray_unfiltered=grayscale(image)
#Step2: Filteration
imshape=image_gray_unfiltered.shape
kernel_size = 5
blur_gray = gaussian_blur(image_gray_unfiltered,kernel_size)
#Step3: Edge detection
low_threshold = 50
high_threshold = 120
edges = canny(blur_gray, low_threshold, high_threshold) #returns array edges same size of image_gray
#Step4: Region masking
low_y_limit=320
vertices = np.array([[(50,imshape[0]),(450, low_y_limit), (500, low_y_limit), (imshape[1],imshape[0])]], dtype=np.int32)
masked_edges=region_of_interest(edges,vertices)
#Step5: Line detection
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 #minimum number of pixels making up a line
max_line_gap = 10 # maximum gap in pixels between connectable line segments
line_image=hough_lines(masked_edges,rho, theta, threshold,min_line_length,max_line_gap,low_y_limit,imshape[0])
#Displaying
color_edges = np.dstack((edges, edges, edges))
lines_edges = weighted_img(line_image, color_edges, 0.8, 1, 0)
f, axarr = plt.subplots(2,figsize=(15,15))
axarr[0].imshow(image, cmap='gray')
axarr[1].imshow(lines_edges)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image with lines are drawn on lanes)
image_gray_unfiltered=grayscale(image)
kernel_size = 5
blur_gray = gaussian_blur(image_gray_unfiltered,kernel_size)
low_threshold = 50
high_threshold = 120
edges = canny(blur_gray, low_threshold, high_threshold) #returns array edges same size of image_gray
masked_edges=region_of_interest(edges,vertices)
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 #minimum number of pixels making up a line
max_line_gap = 10 # maximum gap in pixels between connectable line segments
line_image=hough_lines(masked_edges,rho, theta, threshold,min_line_length,max_line_gap)
color_edges = np.dstack((edges, edges, edges))
lines_edges = weighted_img(line_image, image, 0.8, 1, 0)
return lines_edges
white_output = 'solidWhiteRight-Result.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(white_output))
yellow_output = 'solidYellowLeft-Result.mp4'
clip2 = VideoFileClip('solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(yellow_output))
challenge_output = 'extra.mp4'
clip2 = VideoFileClip('challenge.mp4')
challenge_clip = clip2.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(challenge_output))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step8: Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are
Step9: Test on Images
Step10: run your solution on all test_images and make copies into the test_images directory).
Step11: Test on Videos
Step12: Let's try the one with the solid white lane on the right first ...
Step14: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
Step16: At this point, if you were successful you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. Modify your draw_lines function accordingly and try re-running your pipeline.
Step18: Reflections
|
4,790
|
<ASSISTANT_TASK:>
Python Code:
import Bio.Blast.NCBIWWW as BBNW
import Bio.Seq as BS
import Bio.Alphabet as BA
# BLAST program to use
prog = "blastp"
# database to search against
database = "swissprot"
# query sequence as a Seq object
query = BS.Seq("IRVEGNLRVEYLDDRNTFRHSVVVPYEPPE",
alphabet=BA.IUPAC.protein)
# run NCBI BLAST
handle = BBNW.qblast(prog, database, query)
# save to file
# (particularly useful with BLAST, which is slow to run)
with open('blast-results.xml', 'w') as f:
f.write(handle.read())
# BLAST program to use
prog = "blastp"
# database to search against
database = "swissprot"
# query sequences as a list of IDs
query = ['P01013', 'P12345']
# NCBI BLAST expects IDs as a string with one ID per line
query = "\n".join(query)
# run NCBI BLAST
handle = BBNW.qblast(prog, database, query)
# save to file
# (particularly useful with BLAST, which is slow to run)
with open('blast-results-many.xml', 'w') as f:
f.write(handle.read())
import Bio.SearchIO as BSIO
# parse the result into a QueryResult object
with open('blast-results.xml') as f:
# remember to specify the file format (here 'blast-xml')
result = BSIO.read(f, 'blast-xml')
# a single QueryResult
print(result)
# basic information about the search program
print(result.program)
print(result.version)
print()
# basic information about the query
print(result.id)
print(result.description)
# (query sequence length)
print(result.seq_len)
# (target database)
print(result.target)
# (substitution matrix)
print(result.param_matrix)
print()
# all available pieces of information (ignore keys starting with underscore)
for k in result.__dict__.keys():
print(k)
# parse the results into QueryResult objects
with open('blast-results-many.xml') as f:
# one QueryResult per query sequence
for r in BSIO.parse(f, 'blast-xml'):
print(r, end='\n\n')
# number of hits
print(len(result), end='\n\n')
# select the first three hits
# (returns a new QueryResult object)
top_result = result[:3]
# iterate over hits
for hit in top_result:
print(hit, end='\n\n')
# iterate over the hit keys (i.e. their ids)
for uid in top_result.hit_keys:
print(uid)
print()
# hit to specific database entry
print(top_result['sp|Q64662.1|'], end='\n\n')
# does entry exist among the hits?
print('sp|Q64662.1|' in top_result, end='\n\n')
# function to sort by sequence length
fn = lambda hit: hit.seq_len
# sort by sequence length in reverse order
# and produce a new QueryResult object (in_place=False)
sorted_result = result.sort(key=fn, reverse=True, in_place=False)
# original top-3 ids
print(result.hit_keys[:3], end='\n\n')
# sorted top-3 ids
print(sorted_result.hit_keys[:3], end='\n\n')
# filter function to get hits with sequence length > 500
fn = lambda hit: hit.seq_len > 500
# filter hits and produce a new QueryResult object
filtered_result = result.hit_filter(fn)
# number of original hits
print(len(result), end='\n\n')
# number of filtered hits
print(len(filtered_result), end='\n\n')
# (use the first hit as an example)
hit = result[0]
print(hit, end='\n\n')
# basic information about the query
print(hit.query_id)
print(hit.query_description)
print()
# basic information about the hit entry
# (these are about the database entry, not the query sequence)
print(hit.id)
print(hit.description)
print(hit.seq_len)
print(hit.accession)
print()
# all available pieces of information (ignore keys starting with underscore)
for k in hit.__dict__.keys():
print(k)
# number of HSPs
print(len(hit), end='\n\n')
# select the first three HSPs (there is only one HSP in this case)
# (returns a new Hit object)
top_hsp = hit[:3]
# iterate over HSPs
for hsp in hit:
print(hsp, end='\n\n')
# (notice that, unlike Hits, HSPs do not have keys
# by which you could access them)
# function to sort by E-value
fn = lambda hsp: hsp.evalue
# sort by E-value in reverse order
# and produce a new Hit object (in_place=False)
sorted_hit = hit.sort(key=fn, reverse=True, in_place=False)
# original HSPs
print(hit, end='\n\n')
# sorted HSPs
print(sorted_hit, end='\n\n')
# filter function to get HSPs with E-value < 10^-6
fn = lambda hsp: hsp.evalue < 0.000001
# filter HSPs and produce a new Hit object
filtered_hit = hit.filter(fn)
# number of original hits
print(len(hit), end='\n\n')
# number of filtered hits
print(len(filtered_hit), end='\n\n')
# filter function to get HSPs with E-value < 10^-6
fn = lambda hsp: hsp.evalue < 0.000001
# filter hits and produce a new QueryResult object
hspfiltered_result = result.hsp_filter(fn)
# number of original hits
print(len(result), end='\n\n')
# E-values in original hits
for e in sorted([hsp.evalue for hsp in hit]
for hit in result):
print(e)
print()
# number of filtered hits
print(len(hspfiltered_result), end='\n\n')
# E-values in filtered hits
for e in sorted([hsp.evalue for hsp in hit]
for hit in hspfiltered_result):
print(e)
print()
# (use the first HSP as an example)
hsp = hit[0]
print(hsp, end='\n\n')
# basic information about the query sequence
print(hsp.query_id)
print(hsp.query_description)
print()
# basic information about the hit entry
print(hsp.hit_id)
print(hsp.hit_description)
print()
# matched query and hit sequence segments as Seq objects
print(hsp.query, end='\n\n')
print(hsp.hit, end='\n\n')
# start and end positions of the match within the query sequence
print(hsp.query_start)
print(hsp.query_end)
print()
# start and end positions of the match within the hit sequence
print(hsp.hit_start)
print(hsp.hit_end)
print()
# the alignment of the matched segments
print(hsp.aln)
# basic information about the match
# (E-value)
print(hsp.evalue)
# (bit score)
print(hsp.bitscore)
# (number of identical residues in alignment)
print(hsp.ident_num)
# (number of positive residues in alignment)
print(hsp.pos_num)
# (number of gaps in alignment)
print(hsp.gap_num)
print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The NCBI BLAST will accept several query sequences simultaneously. In fact, it is preferred to send all query sequences at once, if possible.
Step2: As with the UniProt API, the qblast function reflects the behaviour of the corresponding website. It is therefore a good idea to take advantage of the graphical interface when designing the analysis and debugging the code. The NCBI BLAST and its descriptions are available at https
Step3: The parse function will parse and iterate over several query results.
Step4: QueryResult objects behave like lists and dictionaries
Step5: The hits can be sorted in-place or into a new QueryResult object. Like with the built-in sort function in Python, you can specify the sort order by supplying the sort function. The hits can also be filtered much like the built-in filter function in Python.
Step6: Hit objects contain the details of single database entries
Step7: HSP objects can also be filtered directly from QueryResult objects. This is convenient because you may need to filter your hits based on the values within their HSPs.
Step8: HSPs contain the details of the matched segments of sequences
|
4,791
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from cobra import Model, Reaction, Metabolite
# Best practise: SBML compliant IDs
model = Model('example_model')
reaction = Reaction('3OAS140')
reaction.name = '3 oxoacyl acyl carrier protein synthase n C140 '
reaction.subsystem = 'Cell Envelope Biosynthesis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
ACP_c = Metabolite(
'ACP_c',
formula='C11H21N2O7PRS',
name='acyl-carrier-protein',
compartment='c')
omrsACP_c = Metabolite(
'3omrsACP_c',
formula='C25H45N2O9PRS',
name='3-Oxotetradecanoyl-acyl-carrier-protein',
compartment='c')
co2_c = Metabolite('co2_c', formula='CO2', name='CO2', compartment='c')
malACP_c = Metabolite(
'malACP_c',
formula='C14H22N2O10PRS',
name='Malonyl-acyl-carrier-protein',
compartment='c')
h_c = Metabolite('h_c', formula='H', name='H', compartment='c')
ddcaACP_c = Metabolite(
'ddcaACP_c',
formula='C23H43N2O8PRS',
name='Dodecanoyl-ACP-n-C120ACP',
compartment='c')
reaction.add_metabolites({
malACP_c: -1.0,
h_c: -1.0,
ddcaACP_c: -1.0,
co2_c: 1.0,
ACP_c: 1.0,
omrsACP_c: 1.0
})
reaction.reaction # This gives a string representation of the reaction
reaction.gene_reaction_rule = '( STM2378 or STM1197 )'
reaction.genes
print('%i reactions initially' % len(model.reactions))
print('%i metabolites initially' % len(model.metabolites))
print('%i genes initially' % len(model.genes))
model.add_reactions([reaction])
# Now there are things in the model
print('%i reaction' % len(model.reactions))
print('%i metabolites' % len(model.metabolites))
print('%i genes' % len(model.genes))
# Iterate through the the objects in the model
print("Reactions")
print("---------")
for x in model.reactions:
print("%s : %s" % (x.id, x.reaction))
print("")
print("Metabolites")
print("-----------")
for x in model.metabolites:
print('%9s : %s' % (x.id, x.formula))
print("")
print("Genes")
print("-----")
for x in model.genes:
associated_ids = (i.id for i in x.reactions)
print("%s is associated with reactions: %s" %
(x.id, "{" + ", ".join(associated_ids) + "}"))
model.objective = '3OAS140'
print(model.objective.expression)
print(model.objective.direction)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need to create metabolites as well. If we were using an existing model, we could use Model.get_by_id to get the appropriate Metabolite objects instead.
Step2: Adding metabolites to a reaction requires using a dictionary of the metabolites and their stoichiometric coefficients. A group of metabolites can be added all at once, or they can be added one at a time.
Step3: The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in Schellenberger et al 2011 Nature Protocols 6(9)
Step4: At this point in time, the model is still empty
Step5: We will add the reaction to the model, which will also add all associated metabolites and genes
Step6: We can iterate through the model objects to observe the contents
Step7: Last we need to set the objective of the model. Here, we just want this to be the maximization of the flux in the single reaction we added and we do this by assigning the reaction's identifier to the objective property of the model.
Step8: The created objective is a symbolic algebraic expression and we can examine it by printing it
|
4,792
|
<ASSISTANT_TASK:>
Python Code:
from pandas import read_csv
srooms_df = read_csv('../data/agaricus-lepiota.data.csv')
from sklearn_pandas import DataFrameMapper
import sklearn
import numpy as np
mappings = ([
('edibility', sklearn.preprocessing.LabelEncoder()),
('odor', sklearn.preprocessing.LabelBinarizer()),
('habitat', sklearn.preprocessing.LabelBinarizer()),
('spore-print-color', sklearn.preprocessing.LabelBinarizer())
])
mapper = DataFrameMapper(mappings)
srooms_np = mapper.fit_transform(srooms_df.copy()).astype(np.float32)
from sklearn.model_selection import train_test_split
train, test = train_test_split(srooms_np, test_size = 0.2, random_state=7)
train_labels = train[:,0:1]
train_data = train[:,1:]
test_labels = test[:,0:1]
test_data = test[:,1:]
import tensorflow as tf
import math
def inference(samples, input_dim, dense1_units, dense2_units):
with tf.name_scope('dense_1'):
weights = tf.Variable(
tf.truncated_normal([input_dim, dense1_units],
stddev=1.0 / math.sqrt(float(input_dim))),
name='weights')
biases = tf.Variable(tf.zeros([dense1_units]),
name='biases')
dense1 = tf.nn.relu(tf.nn.xw_plus_b(samples, weights, biases))
with tf.name_scope('dropout'):
dropout = tf.nn.dropout(dense1, 0.5)
with tf.name_scope('dense_2'):
weights = tf.Variable(
tf.truncated_normal([dense1_units, dense2_units],
stddev=1.0 / math.sqrt(float(dense2_units))),
name='weights')
biases = tf.Variable(tf.zeros([dense2_units]),
name='biases')
output = tf.sigmoid(tf.nn.xw_plus_b(dropout, weights, biases))
return output
def loss(output, labels, from_logits=False):
if not from_logits:
epsilon = 10e-8
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output / (1 - output))
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=output)
return tf.reduce_mean(xentropy)
def training(loss):
tf.summary.scalar('loss', loss)
optimizer = tf.train.AdamOptimizer()
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def predict(output):
return tf.round(output)
def accuracy(output, labels):
return tf.reduce_mean(tf.to_float(tf.equal(predict(output),labels)))
import time
log_dir = './logs/tensor_srooms'
num_epochs=10
batch_size=64
with tf.Graph().as_default():
with tf.name_scope('input'):
features_initializer = tf.placeholder(dtype=tf.float32, shape=train_data.shape)
labels_initializer = tf.placeholder(dtype=tf.float32, shape=train_labels.shape)
input_features = tf.Variable(features_initializer, trainable=False, collections=[])
input_labels = tf.Variable(labels_initializer, trainable=False, collections=[])
# Shuffle the training data between epochs and train in batchs
feature, label = tf.train.slice_input_producer([input_features, input_labels], num_epochs=num_epochs)
features, labels = tf.train.batch([feature, label], batch_size=batch_size)
# Define layers dimensions
output = inference(features, 25, 20, 1)
loss_op = loss(output, labels)
train_op = training(loss_op)
# Define the metrics op
acc_op = accuracy(predict(output), labels)
# Initialize all variables op
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
summary_op = tf.summary.merge_all()
# Saver for the weights
saver = tf.train.Saver()
print('create saver')
# Start Session
sess = tf.Session()
sess.run(init_op)
print('session started')
# Load up the data.
sess.run(input_features.initializer, feed_dict={features_initializer: train_data})
sess.run(input_labels.initializer, feed_dict={labels_initializer: train_labels})
print('loaded data')
# Write the summary for tensorboard
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
# coordinate reading threads
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value, acc_value = sess.run([train_op, loss_op, acc_op])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f, acc = %.3f (%.3f sec)' % (step, loss_value, acc_value, duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, log_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Definition
Step2: Model Compile
Step3: Training
|
4,793
|
<ASSISTANT_TASK:>
Python Code:
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
import shogun as sg
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# use scipy for generating samples
from scipy.stats import laplace, norm
def sample_gaussian_vs_laplace(n=220, mu=0.0, sigma2=1, b=np.sqrt(0.5)):
# sample from both distributions
X=norm.rvs(size=n)*np.sqrt(sigma2)+mu
Y=laplace.rvs(size=n, loc=mu, scale=b)
return X,Y
mu=0.0
sigma2=1
b=np.sqrt(0.5)
n=220
X,Y=sample_gaussian_vs_laplace(n, mu, sigma2, b)
# plot both densities and histograms
plt.figure(figsize=(18,5))
plt.suptitle("Gaussian vs. Laplace")
plt.subplot(121)
Xs=np.linspace(-2, 2, 500)
plt.plot(Xs, norm.pdf(Xs, loc=mu, scale=sigma2))
plt.plot(Xs, laplace.pdf(Xs, loc=mu, scale=b))
plt.title("Densities")
plt.xlabel("$x$")
plt.ylabel("$p(x)$")
plt.subplot(122)
plt.hist(X, alpha=0.5)
plt.xlim([-5,5])
plt.ylim([0,100])
plt.hist(Y,alpha=0.5)
plt.xlim([-5,5])
plt.ylim([0,100])
plt.legend(["Gaussian", "Laplace"])
plt.title('Samples');
print("Gaussian vs. Laplace")
print("Sample means: %.2f vs %.2f" % (np.mean(X), np.mean(Y)))
print("Samples variances: %.2f vs %.2f" % (np.var(X), np.var(Y)))
# turn data into Shogun representation (columns vectors)
feat_p=sg.create_features(X.reshape(1,len(X)))
feat_q=sg.create_features(Y.reshape(1,len(Y)))
# choose kernel for testing. Here: Gaussian
kernel_width=1
kernel=sg.create_kernel("GaussianKernel", width=kernel_width)
# create mmd instance of test-statistic
mmd=sg.QuadraticTimeMMD()
mmd.set_kernel(kernel)
mmd.set_p(feat_p)
mmd.set_q(feat_q)
# compute biased and unbiased test statistic (default is unbiased)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
biased_statistic=mmd.compute_statistic()
mmd.set_statistic_type(sg.ST_UNBIASED_FULL)
statistic=unbiased_statistic=mmd.compute_statistic()
print("%d x MMD_b[X,Y]^2=%.2f" % (len(X), biased_statistic))
print("%d x MMD_u[X,Y]^2=%.2f" % (len(X), unbiased_statistic))
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(200)
# now show a couple of ways to compute the test
# compute p-value for computed test statistic
p_value=mmd.compute_p_value(statistic)
print("P-value of MMD value %.2f is %.2f" % (statistic, p_value))
# compute threshold for rejecting H_0 for a given test power
alpha=0.05
threshold=mmd.compute_threshold(alpha)
print("Threshold for rejecting H0 with a test power of %.2f is %.2f" % (alpha, threshold))
# performing the test by hand given the above results, note that those two are equivalent
if statistic>threshold:
print("H0 is rejected with confidence %.2f" % alpha)
if p_value<alpha:
print("H0 is rejected with confidence %.2f" % alpha)
# or, compute the full two-sample test directly
# fixed test power, binary decision
binary_test_result=mmd.perform_test(alpha)
if binary_test_result:
print("H0 is rejected with confidence %.2f" % alpha)
num_samples=500
# sample null distribution
null_samples=mmd.sample_null()
# sample alternative distribution, generate new data for that
alt_samples=np.zeros(num_samples)
for i in range(num_samples):
X=norm.rvs(size=n, loc=mu, scale=sigma2)
Y=laplace.rvs(size=n, loc=mu, scale=b)
feat_p=sg.create_features(np.reshape(X, (1,len(X))))
feat_q=sg.create_features(np.reshape(Y, (1,len(Y))))
# TODO: reset pre-computed kernel here
mmd.set_p(feat_p)
mmd.set_q(feat_q)
alt_samples[i]=mmd.compute_statistic()
np.std(alt_samples)
def plot_alt_vs_null(alt_samples, null_samples, alpha):
plt.figure(figsize=(18,5))
plt.subplot(131)
plt.hist(null_samples, 50, color='blue')
plt.title('Null distribution')
plt.subplot(132)
plt.title('Alternative distribution')
plt.hist(alt_samples, 50, color='green')
plt.subplot(133)
plt.hist(null_samples, 50, color='blue')
plt.hist(alt_samples, 50, color='green', alpha=0.5)
plt.title('Null and alternative distriution')
# find (1-alpha) element of null distribution
null_samples_sorted=np.sort(null_samples)
quantile_idx=int(len(null_samples)*(1-alpha))
quantile=null_samples_sorted[quantile_idx]
plt.axvline(x=quantile, ymin=0, ymax=100, color='red', label=str(int(round((1-alpha)*100))) + '% quantile of null')
plt.legend();
plot_alt_vs_null(alt_samples, null_samples, alpha)
# optional: plot spectrum of joint kernel matrix
# TODO: it would be good if there was a way to extract the joint kernel matrix for all kernel tests
# get joint feature object and compute kernel matrix and its spectrum
feats_p_q=mmd.get_p_and_q()
sg.as_kernel(mmd.get("kernel")).init(feats_p_q, feats_p_q)
K=sg.as_kernel(mmd.get("kernel")).get_kernel_matrix()
w,_=np.linalg.eig(K)
# visualise K and its spectrum (only up to threshold)
plt.figure(figsize=(18,5))
plt.subplot(121)
plt.imshow(K, interpolation="nearest")
plt.title("Kernel matrix K of joint data $X$ and $Y$")
plt.subplot(122)
thresh=0.1
plt.plot(w[:len(w[w>thresh])])
plt.title("Eigenspectrum of K until component %d" % len(w[w>thresh]));
# threshold for eigenspectrum
thresh=0.1
# compute number of eigenvalues to use
num_eigen=len(w[w>thresh])
# finally, do the test, use biased statistic
mmd.set_statistic_type(sg.ST_BIASED_FULL)
#tell Shogun to use spectrum approximation
mmd.set_null_approximation_method(sg.NAM_MMD2_SPECTRUM)
mmd.spectrum_set_num_eigenvalues(num_eigen)
mmd.set_num_null_samples(num_samples)
# the usual test interface
statistic=mmd.compute_statistic()
p_value_spectrum=mmd.compute_p_value(statistic)
print("Spectrum: P-value of MMD test is %.2f" % p_value_spectrum)
# compare with ground truth from permutation test
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
p_value_permutation=mmd.compute_p_value(statistic)
print("Bootstrapping: P-value of MMD test is %.2f" % p_value_permutation)
# tell Shogun to use gamma approximation
mmd.set_null_approximation_method(sg.NAM_MMD2_GAMMA)
# the usual test interface
statistic=mmd.compute_statistic()
p_value_gamma=mmd.compute_p_value(statistic)
print("Gamma: P-value of MMD test is %.2f" % p_value_gamma)
# compare with ground truth bootstrapping
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
p_value_spectrum=mmd.compute_p_value(statistic)
print("Bootstrapping: P-value of MMD test is %.2f" % p_value_spectrum)
# type I error is false alarm, therefore sample data under H0
num_trials=50
rejections_gamma=np.zeros(num_trials)
rejections_spectrum=np.zeros(num_trials)
rejections_bootstrap=np.zeros(num_trials)
num_samples=50
alpha=0.05
for i in range(num_trials):
X=norm.rvs(size=n, loc=mu, scale=sigma2)
Y=laplace.rvs(size=n, loc=mu, scale=b)
# simulate H0 via merging samples before computing the
Z=np.hstack((X,Y))
X=Z[:len(X)]
Y=Z[len(X):]
feat_p=sg.create_features(np.reshape(X, (1,len(X))))
feat_q=sg.create_features(np.reshape(Y, (1,len(Y))))
# gamma
mmd=sg.QuadraticTimeMMD()
mmd.set_p(feat_p)
mmd.set_q(feat_q)
mmd.set_kernel(kernel)
mmd.set_null_approximation_method(sg.NAM_MMD2_GAMMA)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_gamma[i]=mmd.perform_test(alpha)
# spectrum
mmd=sg.QuadraticTimeMMD()
mmd.set_p(feat_p)
mmd.set_q(feat_q)
mmd.set_kernel(kernel)
mmd.set_null_approximation_method(sg.NAM_MMD2_SPECTRUM)
mmd.spectrum_set_num_eigenvalues(num_eigen)
mmd.set_num_null_samples(num_samples)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_spectrum[i]=mmd.perform_test(alpha)
# bootstrap (precompute kernel)
mmd=sg.QuadraticTimeMMD()
mmd.set_p(feat_p)
mmd.set_q(feat_q)
p_and_q=mmd.get_p_and_q()
kernel.init(p_and_q, p_and_q)
precomputed_kernel=sg.CustomKernel(kernel)
mmd.set_kernel(precomputed_kernel)
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
mmd.set_statistic_type(sg.ST_BIASED_FULL)
rejections_bootstrap[i]=mmd.perform_test(alpha)
convergence_gamma=np.cumsum(rejections_gamma)/(np.arange(num_trials)+1)
convergence_spectrum=np.cumsum(rejections_spectrum)/(np.arange(num_trials)+1)
convergence_bootstrap=np.cumsum(rejections_bootstrap)/(np.arange(num_trials)+1)
print("Average rejection rate of H0 for Gamma is %.2f" % np.mean(convergence_gamma))
print("Average rejection rate of H0 for Spectrum is %.2f" % np.mean(convergence_spectrum))
print("Average rejection rate of H0 for Bootstrapping is %.2f" % np.mean(rejections_bootstrap))
# paramters of dataset
m=20000
distance=10
stretch=5
num_blobs=3
angle=np.pi/4
# these are streaming features
gen_p=sg.GaussianBlobsDataGenerator(num_blobs, distance, 1, 0)
gen_q=sg.GaussianBlobsDataGenerator(num_blobs, distance, stretch, angle)
# stream some data and plot
num_plot=1000
features=gen_p.get_streamed_features(num_plot)
features=features.create_merged_copy(gen_q.get_streamed_features(num_plot))
data=features.get("feature_matrix")
plt.figure(figsize=(18,5))
plt.subplot(121)
plt.grid(True)
plt.plot(data[0][0:num_plot], data[1][0:num_plot], 'r.', label='$x$')
plt.title('$X\sim p$')
plt.subplot(122)
plt.grid(True)
plt.plot(data[0][num_plot+1:2*num_plot], data[1][num_plot+1:2*num_plot], 'b.', label='$x$', alpha=0.5)
plt.title('$Y\sim q$')
plt.show()
block_size=100
# if features are already under the streaming interface, just pass them
mmd=sg.LinearTimeMMD()
mmd.set_p(gen_p)
mmd.set_q(gen_q)
mmd.set_kernel(kernel)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
# compute an unbiased estimate in linear time
statistic=mmd.compute_statistic()
print("MMD_l[X,Y]^2=%.2f" % statistic)
# note: due to the streaming nature, successive calls of compute statistic use different data
# and produce different results. Data cannot be stored in memory
for _ in range(5):
print("MMD_l[X,Y]^2=%.2f" % mmd.compute_statistic())
# data source
gen_p=sg.GaussianBlobsDataGenerator(num_blobs, distance, 1, 0)
gen_q=sg.GaussianBlobsDataGenerator(num_blobs, distance, stretch, angle)
num_samples=100
print("Number of data is %d" % num_samples)
# retreive some points, store them as non-streaming data in memory
data_p=gen_p.get_streamed_features(num_samples)
data_q=gen_q.get_streamed_features(num_samples)
# example to create mmd (note that num_samples can be maximum the number of data in memory)
mmd=sg.LinearTimeMMD()
mmd.set_p(data_p)
mmd.set_q(data_q)
mmd.set_kernel(sg.create_kernel("GaussianKernel", width=1))
mmd.set_num_blocks_per_burst(100)
print("Linear time MMD statistic: %.2f" % mmd.compute_statistic())
mmd=sg.LinearTimeMMD()
mmd.set_p(gen_p)
mmd.set_q(gen_q)
mmd.set_kernel(kernel)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
print("m=%d samples from p and q" % m)
print("Binary test result is: " + ("Rejection" if mmd.perform_test(alpha) else "No rejection"))
print("P-value test result is %.2f" % mmd.compute_p_value(mmd.compute_statistic()))
# mmd instance using streaming features
mmd=sg.LinearTimeMMD()
mmd.set_p(gen_p)
mmd.set_q(gen_q)
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_num_blocks_per_burst(block_size)
sigmas=[2**x for x in np.linspace(-5, 5, 11)]
print("Choosing kernel width from", ["{0:.2f}".format(sigma) for sigma in sigmas])
for i in range(len(sigmas)):
mmd.add_kernel(sg.create_kernel("GaussianKernel", width=sigmas[i]))
# optmal kernel choice is possible for linear time MMD
mmd.set_kernel_selection_strategy(sg.KSM_MAXIMIZE_POWER)
# must be set true for kernel selection
mmd.set_train_test_mode(True)
# select best kernel
mmd.select_kernel()
best_kernel=mmd.get("kernel")
print("Best single kernel has bandwidth %.2f" % np.exp(best_kernel.get("width")))
mmd.set_null_approximation_method(sg.NAM_MMD1_GAUSSIAN);
p_value_best=mmd.compute_p_value(mmd.compute_statistic());
print("Bootstrapping: P-value of MMD test with optimal kernel is %.2f" % p_value_best)
m=5000
mmd.set_num_samples_p(m)
mmd.set_num_samples_q(m)
mmd.set_train_test_mode(False)
num_samples=500
# sample null and alternative distribution, implicitly generate new data for that
mmd.set_null_approximation_method(sg.NAM_PERMUTATION)
mmd.set_num_null_samples(num_samples)
null_samples=mmd.sample_null()
alt_samples=np.zeros(num_samples)
for i in range(num_samples):
alt_samples[i]=mmd.compute_statistic()
plot_alt_vs_null(alt_samples, null_samples, alpha)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some Formal Basics (skip if you just want code examples)
Step2: Now how to compare these two sets of samples? Clearly, a t-test would be a bad idea since it basically compares mean and variance of $X$ and $Y$. But we set that to be equal. By chance, the estimates of these statistics might differ, but that is unlikely to be significant. Thus, we have to look at higher order statistics of the samples. In fact, kernel two-sample tests look at all (infinitely many) higher order moments.
Step3: Quadratic Time MMD
Step4: Any sub-class of <a href="http
Step5: Now let us visualise distribution of MMD statistic under $H_0
Step6: Null and Alternative Distribution Illustrated
Step7: Different Ways to Approximate the Null Distribution for the Quadratic Time MMD
Step8: The above plot of the Eigenspectrum shows that the Eigenvalues are decaying extremely fast. We choose the number for the approximation such that all Eigenvalues bigger than some threshold are used. In this case, we will not loose a lot of accuracy while gaining a significant speedup. For slower decaying Eigenspectrums, this approximation might be more expensive.
Step9: The Gamma Moment Matching Approximation and Type I errors
Step10: As we can see, the above example was kind of unfortunate, as the approximation fails badly. We check the type I error to verify that. This works similar to sampling the alternative distribution
Step11: We see that Gamma basically never rejects, which is inline with the fact that the p-value was massively overestimated above. Note that for the other tests, the p-value is also not at its desired value, but this is due to the low number of samples/repetitions in the above code. Increasing them leads to consistent type I errors.
Step12: We now describe the linear time MMD, as described in [1, Section 6], which is implemented in Shogun. A fast, unbiased estimate for the original MMD expression which still uses all available data can be obtained by dividing data into two parts and then compute
Step13: Sometimes, one might want to use <a href="http
Step14: The Gaussian Approximation to the Null Distribution
Step15: Kernel Selection for the MMD -- Overview
Step16: Now perform two-sample test with that kernel
Step17: For the linear time MMD, the null and alternative distributions look different than for the quadratic time MMD as plotted above. Let's sample them (takes longer, reduce number of samples a bit). Note how we can tell the linear time MMD to smulate the null hypothesis, which is necessary since we cannot permute by hand as samples are not in memory)
Step18: And visualise again. Note that both null and alternative distribution are Gaussian, which allows the fast null distribution approximation and the optimal kernel selection
|
4,794
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from scipy.signal import argrelmax
fracture_pressure_data = np.loadtxt("data/fracture_pressure_abnormal.csv", delimiter=",")
fracture_pressure, TVD_frac = fracture_pressure_data.T
pore_pressure_data = np.loadtxt("data/pore_pressure_abnormal.csv", delimiter=",")
pore_pressure, TVD_pore = pore_pressure_data.T
print argrelmax(pore_pressure)[0]
print pore_pressure[argrelmax(pore_pressure)[0]]
fig, ax = plt.subplots(1, figsize=(13, 13))
ax.set_title("Casing Setting Depths", fontsize=30, y=1.08)
label_size = 12
ax.plot(fracture_pressure, TVD_frac, color="red", linewidth=3, label="Fracture Pressure")
ax.plot(pore_pressure, TVD_pore, color="blue", linewidth=3, label="Pore Pressure")
ax.set_ylabel("Total Vertical Depth [ft]", fontsize=25)
ax.set_ylim(ax.get_ylim()[::-1])
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
yed = [tick.label.set_fontsize(label_size) for tick in ax.yaxis.get_major_ticks()]
xed = [tick.label.set_fontsize(label_size) for tick in ax.xaxis.get_major_ticks()]
ax.set_xlabel("Equivalent Mud Density [ppg]", fontsize=25)
ax.ticklabel_format(fontsize=25)
ax.grid()
ax.legend(fontsize=25)
class BottomUpCasingDesign(object):
def __init__(self, fracture_pressure, tvd_frac, pore_pressure, tvd_pore):
self.fracture_pressure = fracture_pressure
self.tvd_frac = tvd_frac
self.pore_pressure = pore_pressure
self.tvd_pore = tvd_pore
def bottom_up_casing_seat(self, previous_section_mud_weight):
next_section_tvd = np.interp(previous_section_mud_weight, self.fracture_pressure, self.tvd_frac)
next_section_mud_weight = np.interp(next_section_tvd, self.tvd_pore, self.pore_pressure)
return next_section_mud_weight, next_section_tvd
def find_casing_seats(self, maxiter=100):
current_mud_weight = self.pore_pressure[-1]
current_depth = self.tvd_pore[-1]
casing_seats = []
i = 0
while current_depth > 0.0 and i < maxiter:
print self.check_self_intersect(current_mud_weight, self.tvd_pore, self.pore_pressure)
next_section_mud_weight, next_section_tvd = self.bottom_up_casing_seat(current_mud_weight)
casing_seats.append((next_section_tvd, next_section_mud_weight))
current_depth, current_mud_weight = next_section_tvd, next_section_mud_weight
i += 1
break
return casing_seats
def check_self_intersect(self, value, tvd, mudweight):
self_intersect_tvd = np.interp(value, mudweight, tvd)
return self_intersect_tvd
def get_casing_seat_plot_data(self, casing_seats):
casing_seats_tvd = [self.tvd_pore[-1], casing_seats[0][0]]
casing_seats_ppg = [self.pore_pressure[-1], self.pore_pressure[-1]]
for p1, p2 in zip(casing_seats, casing_seats[1::]):
casing_seats_tvd.append(p1[0])
casing_seats_tvd.append(p2[0])
for p1, p2 in zip(casing_seats, casing_seats):
casing_seats_ppg.append(p1[1])
casing_seats_ppg.append(p2[1])
return casing_seats_tvd, casing_seats_ppg[0:-2]
fracture_pressure_safety = 0.97*fracture_pressure
pore_pressure_safety = 1.03*pore_pressure
bottom_up = BottomUpCasingDesign(fracture_pressure_safety, TVD_frac, pore_pressure_safety, TVD_pore)
casing_seats = bottom_up.find_casing_seats()
casing_seats_tvd, casing_seats_ppg = bottom_up.get_casing_seat_plot_data(casing_seats)
print casing_seats_tvd
print casing_seats_ppg
fig, ax = plt.subplots(1, figsize=(13, 13))
ax.set_title("Casing Setting Depths", fontsize=30, y=1.08)
label_size = 12
ax.plot(fracture_pressure, TVD_frac, color="red", linewidth=3, label="Fracture Pressure")
ax.plot(pore_pressure, TVD_pore, color="blue", linewidth=3, label="Pore Pressure")
ax.plot(fracture_pressure_safety, TVD_frac, color="red", linewidth=3, label="Fracture Pressure", linestyle="--")
ax.plot(pore_pressure_safety, TVD_pore, color="blue", linewidth=3, label="Pore Pressure", linestyle="--")
ax.plot(casing_seats_ppg, casing_seats_tvd, color="black", linestyle="--", linewidth=3, label="Casing Seats")
ax.set_ylabel("Total Vertical Depth [ft]", fontsize=25)
ax.set_ylim(ax.get_ylim()[::-1])
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
yed = [tick.label.set_fontsize(label_size) for tick in ax.yaxis.get_major_ticks()]
xed = [tick.label.set_fontsize(label_size) for tick in ax.xaxis.get_major_ticks()]
ax.set_xlabel("Equivalent Mud Density [ppg]", fontsize=25)
ax.ticklabel_format(fontsize=25)
ax.grid()
ax.legend(fontsize=20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Turning theory into code
Step2: Step two now involves extending a line up until we hit the fracture pressure. This means we have to interpolate our known pore pressure at target to find the corresponding depth in our fracture pressure data.
|
4,795
|
<ASSISTANT_TASK:>
Python Code:
import os
import torch
import pyro
import pyro.distributions as dist
from torch.distributions import constraints
from pyro import poutine
from pyro.distributions.util import broadcast_shape
from pyro.infer import Trace_ELBO, JitTrace_ELBO, TraceEnum_ELBO, JitTraceEnum_ELBO, SVI
from pyro.infer.mcmc import MCMC, NUTS
from pyro.infer.autoguide import AutoDiagonalNormal
from pyro.optim import Adam
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('1.7.0')
def model(data):
loc = pyro.sample("loc", dist.Normal(0., 10.))
scale = pyro.sample("scale", dist.LogNormal(0., 3.))
with pyro.plate("data", data.size(0)):
pyro.sample("obs", dist.Normal(loc, scale), obs=data)
guide = AutoDiagonalNormal(model)
data = dist.Normal(0.5, 2.).sample((100,))
%%time
pyro.clear_param_store()
elbo = Trace_ELBO()
svi = SVI(model, guide, Adam({'lr': 0.01}), elbo)
for i in range(2 if smoke_test else 1000):
svi.step(data)
%%time
pyro.clear_param_store()
guide(data) # Do any lazy initialization before compiling.
elbo = JitTrace_ELBO()
svi = SVI(model, guide, Adam({'lr': 0.01}), elbo)
for i in range(2 if smoke_test else 1000):
svi.step(data)
%%time
nuts_kernel = NUTS(model)
pyro.set_rng_seed(1)
mcmc_run = MCMC(nuts_kernel, num_samples=100).run(data)
%%time
nuts_kernel = NUTS(model, jit_compile=True, ignore_jit_warnings=True)
pyro.set_rng_seed(1)
mcmc_run = MCMC(nuts_kernel, num_samples=100).run(data)
def model(sequence, num_sequences, length, state_dim=16):
# This is a Gaussian HMM model.
with pyro.plate("states", state_dim):
trans = pyro.sample("trans", dist.Dirichlet(0.5 * torch.ones(state_dim)))
emit_loc = pyro.sample("emit_loc", dist.Normal(0., 10.))
emit_scale = pyro.sample("emit_scale", dist.LogNormal(0., 3.))
# We're doing manual data subsampling, so we need to scale to actual data size.
with poutine.scale(scale=num_sequences):
# We'll use enumeration inference over the hidden x.
x = 0
for t in pyro.markov(range(length)):
x = pyro.sample("x_{}".format(t), dist.Categorical(trans[x]),
infer={"enumerate": "parallel"})
pyro.sample("y_{}".format(t), dist.Normal(emit_loc[x], emit_scale),
obs=sequence[t])
guide = AutoDiagonalNormal(poutine.block(model, expose=["trans", "emit_scale", "emit_loc"]))
# This is fake data of different lengths.
lengths = [24] * 50 + [48] * 20 + [72] * 5
sequences = [torch.randn(length) for length in lengths]
%%time
pyro.clear_param_store()
elbo = TraceEnum_ELBO(max_plate_nesting=1)
svi = SVI(model, guide, Adam({'lr': 0.01}), elbo)
for i in range(1 if smoke_test else 10):
for sequence in sequences:
svi.step(sequence, # tensor args
num_sequences=len(sequences), length=len(sequence)) # non-tensor args
%%time
pyro.clear_param_store()
# Do any lazy initialization before compiling.
guide(sequences[0], num_sequences=len(sequences), length=len(sequences[0]))
elbo = JitTraceEnum_ELBO(max_plate_nesting=1)
svi = SVI(model, guide, Adam({'lr': 0.01}), elbo)
for i in range(1 if smoke_test else 10):
for sequence in sequences:
svi.step(sequence, # tensor args
num_sequences=len(sequences), length=len(sequence)) # non-tensor args
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: First let's run as usual with an SVI object and Trace_ELBO.
Step3: Next to run with a jit compiled inference, we simply replace
Step4: Notice that we have a more than 2x speedup for this small model.
Step5: We can compile the potential energy computation in NUTS using the jit_compile=True argument to the NUTS kernel. We also silence JIT warnings due to the presence of tensor constants in the model by using ignore_jit_warnings=True.
Step6: We notice a significant increase in sampling throughput when JIT compilation is enabled.
Step7: Now lets' run SVI as usual.
Step8: Again we'll simply swap in a Jit* implementation
|
4,796
|
<ASSISTANT_TASK:>
Python Code:
from numpy import concatenate, array
from numpy.random import randn
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
num = 200
d1 = concatenate((randn(1,num),10.*randn(1,num)),0)
d2 = concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[0.]])
d3 = concatenate((randn(1,num),10.*randn(1,num)),0)+array([[0.],[100.]])
d4 = concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[100.]])
rectangle = concatenate((d1,d2,d3,d4),1)
totalPoints = 800
import matplotlib.pyplot as pyplot
%matplotlib inline
figure,axis = pyplot.subplots(1,1)
axis.plot(rectangle[0], rectangle[1], 'o', color='r', markersize=5)
axis.set_xlim(-5,15)
axis.set_ylim(-50,150)
axis.set_title('Toy data : Rectangle')
pyplot.show()
from shogun import *
train_features = features(rectangle)
# number of clusters
k = 2
# distance metric over feature matrix - Euclidean distance
distance = EuclideanDistance(train_features, train_features)
# KMeans object created
kmeans = KMeans(k, distance)
# KMeans training
kmeans.train()
# cluster centers
centers = kmeans.get_cluster_centers()
# Labels for data points
result = kmeans.apply()
def plotResult(title = 'KMeans Plot'):
figure,axis = pyplot.subplots(1,1)
for i in range(totalPoints):
if result[i]==0.0:
axis.plot(rectangle[0,i], rectangle[1,i], 'o', color='g', markersize=3)
else:
axis.plot(rectangle[0,i], rectangle[1,i], 'o', color='y', markersize=3)
axis.plot(centers[0,0], centers[1,0], 'ko', color='g', markersize=10)
axis.plot(centers[0,1], centers[1,1], 'ko', color='y', markersize=10)
axis.set_xlim(-5,15)
axis.set_ylim(-50,150)
axis.set_title(title)
pyplot.show()
plotResult('KMeans Results')
from numpy import array
initial_centers = array([[0.,10.],[50.,50.]])
# initial centers passed
kmeans = KMeans(k, distance, initial_centers)
# KMeans training
kmeans.train(train_features)
# cluster centers
centers = kmeans.get_cluster_centers()
# Labels for data points
result = kmeans.apply()
# plot the results
plotResult('Hand initialized KMeans Results 1')
new_initial_centers = array([[5.,5.],[0.,100.]])
# set new initial centers
kmeans.set_initial_centers(new_initial_centers)
# KMeans training
kmeans.train(train_features)
# cluster centers
centers = kmeans.get_cluster_centers()
# Labels for data points
result = kmeans.apply()
# plot the results
plotResult('Hand initialized KMeans Results 2')
# set flag for using KMeans++
kmeans = KMeans(k, distance, True)
# set KMeans++ flag
kmeans.set_use_kmeanspp(True)
# KMeans training
kmeans.train(train_features)
# cluster centers
centers = kmeans.get_cluster_centers()
# Labels for data points
result = kmeans.apply()
# plot the results
plotResult('KMeans with KMeans++ Results')
#unset KMeans++ flag
kmeans.set_use_kmeanspp(False)
# set training method to mini-batch
kmeans = KMeansMiniBatch(k, distance)
# set both parameters together batch size-2 and no. of iterations-100
kmeans.set_mb_params(2,100)
# OR
# set batch size-2
kmeans.set_batch_size(2)
# set no. of iterations-100
kmeans.set_mb_iter(100)
# KMeans training
kmeans.train(train_features)
# cluster centers
centers = kmeans.get_cluster_centers()
# Labels for data points
result = kmeans.apply()
# plot the results
plotResult('Mini-batch KMeans Results')
f = open(os.path.join(SHOGUN_DATA_DIR, 'uci/iris/iris.data'))
feats = []
# read data from file
for line in f:
words = line.rstrip().split(',')
feats.append([float(i) for i in words[0:4]])
f.close()
# create observation matrix
obsmatrix = array(feats).T
# plot the data
figure,axis = pyplot.subplots(1,1)
# First 50 data belong to Iris Sentosa, plotted in green
axis.plot(obsmatrix[2,0:50], obsmatrix[3,0:50], 'o', color='green', markersize=5)
# Next 50 data belong to Iris Versicolour, plotted in red
axis.plot(obsmatrix[2,50:100], obsmatrix[3,50:100], 'o', color='red', markersize=5)
# Last 50 data belong to Iris Virginica, plotted in blue
axis.plot(obsmatrix[2,100:150], obsmatrix[3,100:150], 'o', color='blue', markersize=5)
axis.set_xlim(-1,8)
axis.set_ylim(-1,3)
axis.set_title('3 varieties of Iris plants')
pyplot.show()
def apply_kmeans_iris(data):
# wrap to Shogun features
train_features = features(data)
# number of cluster centers = 3
k = 3
# distance function features - euclidean
distance = EuclideanDistance(train_features, train_features)
# initialize KMeans object
kmeans = KMeans(k, distance)
# use kmeans++ to initialize centers [play around: change it to False and compare results]
kmeans.set_use_kmeanspp(True)
# training method is Lloyd by default [play around: change it to mini-batch by uncommenting the following lines]
#kmeans.set_train_method(KMM_MINI_BATCH)
#kmeans.set_mbKMeans_params(20,30)
# training kmeans
kmeans.train(train_features)
# labels for data points
result = kmeans.apply()
return result
result = apply_kmeans_iris(obsmatrix)
# plot the clusters over the original points in 2 dimensions
figure,axis = pyplot.subplots(1,1)
for i in range(150):
if result[i]==0.0:
axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='r', markersize=5)
elif result[i]==1.0:
axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='g', markersize=5)
else:
axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='b', markersize=5)
axis.set_xlim(-1,8)
axis.set_ylim(-1,3)
axis.set_title('Iris plants clustered based on attributes')
pyplot.show()
from numpy import ones, zeros
# first 50 are iris sensosa labelled 0, next 50 are iris versicolour labelled 1 and so on
labels = concatenate((zeros(50),ones(50),2.*ones(50)),0)
# bind labels assigned to Shogun multiclass labels
ground_truth = MulticlassLabels(array(labels,dtype='float64'))
from numpy import nonzero
def analyzeResult(result):
# shogun object for clustering accuracy
AccuracyEval = ClusteringAccuracy()
# changes the labels of result (keeping clusters intact) to produce a best match with ground truth
AccuracyEval.best_map(result, ground_truth)
# evaluates clustering accuracy
accuracy = AccuracyEval.evaluate(result, ground_truth)
# find out which sample points differ from actual labels (or ground truth)
compare = result.get_labels()-labels
diff = nonzero(compare)
return (diff,accuracy)
(diff,accuracy_4d) = analyzeResult(result)
print('Accuracy : ' + str(accuracy_4d))
# plot the difference between ground truth and predicted clusters
figure,axis = pyplot.subplots(1,1)
axis.plot(obsmatrix[2,:],obsmatrix[3,:],'x',color='black', markersize=5)
axis.plot(obsmatrix[2,diff],obsmatrix[3,diff],'x',color='r', markersize=7)
axis.set_xlim(-1,8)
axis.set_ylim(-1,3)
axis.set_title('Difference')
pyplot.show()
from numpy import dot
def apply_pca_to_data(target_dims):
train_features = features(obsmatrix)
submean = PruneVarSubMean(False)
submean.init(train_features)
submean.apply_to_feature_matrix(train_features)
preprocessor = PCA()
preprocessor.set_target_dim(target_dims)
preprocessor.init(train_features)
pca_transform = preprocessor.get_transformation_matrix()
new_features = dot(pca_transform.T, train_features)
return new_features
oneD_matrix = apply_pca_to_data(1)
figure,axis = pyplot.subplots(1,1)
# First 50 data belong to Iris Sentosa, plotted in green
axis.plot(oneD_matrix[0,0:50], zeros(50), 'o', color='green', markersize=5)
# Next 50 data belong to Iris Versicolour, plotted in red
axis.plot(oneD_matrix[0,50:100], zeros(50), 'o', color='red', markersize=5)
# Last 50 data belong to Iris Virginica, plotted in blue
axis.plot(oneD_matrix[0,100:150], zeros(50), 'o', color='blue', markersize=5)
axis.set_xlim(-5,5)
axis.set_ylim(-1,1)
axis.set_title('3 varieties of Iris plants')
pyplot.show()
result = apply_kmeans_iris(oneD_matrix)
(diff,accuracy_1d) = analyzeResult(result)
print('Accuracy : ' + str(accuracy_1d))
# plot the difference between ground truth and predicted clusters
figure,axis = pyplot.subplots(1,1)
axis.plot(oneD_matrix[0,:],zeros(150),'x',color='black', markersize=5)
axis.plot(oneD_matrix[0,diff],zeros(len(diff)),'x',color='r', markersize=7)
axis.set_xlim(-5,5)
axis.set_ylim(-1,1)
axis.set_title('Difference')
pyplot.show()
twoD_matrix = apply_pca_to_data(2)
figure,axis = pyplot.subplots(1,1)
# First 50 data belong to Iris Sentosa, plotted in green
axis.plot(twoD_matrix[0,0:50], twoD_matrix[1,0:50], 'o', color='green', markersize=5)
# Next 50 data belong to Iris Versicolour, plotted in red
axis.plot(twoD_matrix[0,50:100], twoD_matrix[1,50:100], 'o', color='red', markersize=5)
# Last 50 data belong to Iris Virginica, plotted in blue
axis.plot(twoD_matrix[0,100:150], twoD_matrix[1,100:150], 'o', color='blue', markersize=5)
axis.set_title('3 varieties of Iris plants')
pyplot.show()
result = apply_kmeans_iris(twoD_matrix)
(diff,accuracy_2d) = analyzeResult(result)
print('Accuracy : ' + str(accuracy_2d))
# plot the difference between ground truth and predicted clusters
figure,axis = pyplot.subplots(1,1)
axis.plot(twoD_matrix[0,:],twoD_matrix[1,:],'x',color='black', markersize=5)
axis.plot(twoD_matrix[0,diff],twoD_matrix[1,diff],'x',color='r', markersize=7)
axis.set_title('Difference')
pyplot.show()
threeD_matrix = apply_pca_to_data(3)
result = apply_kmeans_iris(threeD_matrix)
(diff,accuracy_3d) = analyzeResult(result)
print('Accuracy : ' + str(accuracy_3d))
# plot the difference between ground truth and predicted clusters
figure,axis = pyplot.subplots(1,1)
axis.plot(obsmatrix[2,:],obsmatrix[3,:],'x',color='black', markersize=5)
axis.plot(obsmatrix[2,diff],obsmatrix[3,diff],'x',color='r', markersize=7)
axis.set_title('Difference')
axis.set_xlim(-1,8)
axis.set_ylim(-1,3)
pyplot.show()
from scipy.interpolate import interp1d
from numpy import linspace
x = array([1, 2, 3, 4])
y = array([accuracy_1d, accuracy_2d, accuracy_3d, accuracy_4d])
f = interp1d(x, y)
xnew = linspace(1,4,10)
pyplot.plot(x,y,'o',xnew,f(xnew),'-')
pyplot.xlim([0,5])
pyplot.xlabel('no. of dims')
pyplot.ylabel('Clustering Accuracy')
pyplot.title('PCA Results')
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The toy data created above consists of 4 gaussian blobs, having 200 points each, centered around the vertices of a rectancle. Let's plot it for convenience.
Step2: With data at our disposal, it is time to apply KMeans to it using the KMeans class in Shogun. First we construct Shogun features from our data
Step3: Next we specify the number of clusters we want and create a distance object specifying the distance metric to be used over our data for our KMeans training
Step4: Next, we create a KMeans object with our desired inputs/parameters and train
Step5: Now that training has been done, let's get the cluster centers and label for each data point
Step6: Finally let us plot the centers and the data points (in different colours for different clusters)
Step7: <b>Note
Step8: Now, let's first get results by repeating the rest of the steps
Step9: The other way to initialize centers by hand is as follows
Step10: Let's complete the rest of the code to get results.
Step11: Note the difference that inititial cluster centers can have on final result.
Step12: The other way to initilize using KMeans++ is as follows
Step13: Completing rest of the steps to get result
Step14: To switch back to random initialization, you may use
Step15: Training Methods
Step16: In mini-batch KMeans it is compulsory to set batch-size and number of iterations. These parameters can be set together or one after the other.
Step17: Completing the code to get results
Step18: Applying KMeans on Real Data
Step19: In the above plot we see that the data points labelled Iris Sentosa form a nice separate cluster of their own. But in case of other 2 varieties, while the data points of same label do form clusters of their own, there is some mixing between the clusters at the boundary. Now let us apply KMeans algorithm and see how well we can extract these clusters.
Step20: Now let us create a 2-D plot of the clusters formed making use of the two most important features (petal length and petal width) and compare it with the earlier plot depicting the actual labels of data points.
Step21: From the above plot, it can be inferred that the accuracy of KMeans algorithm is very high for Iris dataset. Don't believe me? Alright, then let us make use of one of Shogun's clustering evaluation techniques to formally validate the claim. But before that, we have to label each sample in the dataset with a label corresponding to the class to which it belongs.
Step22: Now we can compute clustering accuracy making use of the ClusteringAccuracy class in Shogun
Step23: In the above plot, wrongly clustered data points are marked in red. We see that the Iris Sentosa plants are perfectly clustered without error. The Iris Versicolour plants and Iris Virginica plants are also clustered with high accuracy, but there are some plant samples of either class that have been clustered with the wrong class. This happens near the boundary of the 2 classes in the plot and was well expected. Having mastered KMeans, it's time to move on to next interesting topic.
Step24: Next, let us get an idea of the data in 1-D by plotting it.
Step25: Let us now apply KMeans to the 1-D data to get clusters.
Step26: Now that we have the results, the inevitable step is to check how good these results are.
Step27: 2-Dimensional Representation
Step28: STEP 2
Step29: STEP 3
Step30: 3-Dimensional Representation
Step31: STEP 2
Step32: STEP 3
Step33: Finally, let us plot clustering accuracy vs. number of dimensions to consolidate our results.
|
4,797
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-veg', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
4,798
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
print("TensorFlow version: ", tf.version.VERSION)
# This will be an int32 tensor by default; see "dtypes" below.
rank_0_tensor = tf.constant(4)
print(rank_0_tensor)
# Let's make this a float tensor.
rank_1_tensor = tf.constant([2.0, 3.0, 4.0])
print(rank_1_tensor)
# If we want to be specific, we can set the dtype (see below) at creation time
rank_2_tensor = tf.constant(
[[1, 2], [3, 4], [5, 6]],
dtype=None # TODO 1a
# TODO: Your code goes here.
)
print(rank_2_tensor)
# There can be an arbitrary number of
# axes (sometimes called "dimensions")
rank_3_tensor = tf.constant(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]],
[[20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
print(rank_3_tensor)
# Convert a tensor to a NumPy array using `np.array` method
# TODO 1b
# TODO -- Your code here.
# Convert a tensor to a NumPy array using `tensor.numpy` method
# TODO 1c
# TODO -- Your code here.
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[1, 1], [1, 1]]) # Could have also said `tf.ones([2,2])`
print(tf.add(a, b), "\n")
print(tf.multiply(a, b), "\n")
print(tf.matmul(a, b), "\n")
print(a + b, "\n") # element-wise addition
print(a * b, "\n") # element-wise multiplication
print(a @ b, "\n") # matrix multiplication
c = tf.constant([[4.0, 5.0], [10.0, 1.0]])
# Find the largest value
print(tf.reduce_max(c))
# TODO 1d
# Find the index of the largest value
# TODO -- Your code here.
# Compute the softmax
# TODO -- Your code here.
rank_4_tensor = tf.zeros([3, 2, 4, 5])
print("Type of every element:", rank_4_tensor.dtype)
print("Number of dimensions:", rank_4_tensor.ndim)
print("Shape of tensor:", rank_4_tensor.shape)
print("Elements along axis 0 of tensor:", rank_4_tensor.shape[0])
print("Elements along the last axis of tensor:", rank_4_tensor.shape[-1])
print("Total number of elements (3*2*4*5): ", tf.size(rank_4_tensor).numpy())
rank_1_tensor = tf.constant([0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
print(rank_1_tensor.numpy())
print("First:", rank_1_tensor[0].numpy())
print("Second:", rank_1_tensor[1].numpy())
print("Last:", rank_1_tensor[-1].numpy())
print("Everything:", rank_1_tensor[:].numpy())
print("Before 4:", rank_1_tensor[:4].numpy())
print("From 4 to the end:", rank_1_tensor[4:].numpy())
print("From 2, before 7:", rank_1_tensor[2:7].numpy())
print("Every other item:", rank_1_tensor[::2].numpy())
print("Reversed:", rank_1_tensor[::-1].numpy())
print(rank_2_tensor.numpy())
# Pull out a single value from a 2-rank tensor
print(rank_2_tensor[1, 1].numpy())
# Get row and column tensors
print("Second row:", rank_2_tensor[1, :].numpy())
print("Second column:", rank_2_tensor[:, 1].numpy())
print("Last row:", rank_2_tensor[-1, :].numpy())
print("First item in last column:", rank_2_tensor[0, -1].numpy())
print("Skip the first row:")
print(rank_2_tensor[1:, :].numpy(), "\n")
print(rank_3_tensor[:, :, 4])
# Shape returns a `TensorShape` object that shows the size on each dimension
var_x = tf.Variable(tf.constant([[1], [2], [3]]))
print(var_x.shape)
# You can convert this object into a Python list, too
print(var_x.shape.as_list())
# We can reshape a tensor to a new shape.
# Note that we're passing in a list
# TODO 2a
# TODO -- Your code here.
print(var_x.shape)
print(reshaped.shape)
print(rank_3_tensor)
# A `-1` passed in the `shape` argument says "Whatever fits".
print(tf.reshape(rank_3_tensor, [-1]))
print(tf.reshape(rank_3_tensor, [3 * 2, 5]), "\n")
print(tf.reshape(rank_3_tensor, [3, -1]))
# Bad examples: don't do this
# You can't reorder axes with reshape.
print(tf.reshape(rank_3_tensor, [2, 3, 5]), "\n")
# This is a mess
print(tf.reshape(rank_3_tensor, [5, 6]), "\n")
# This doesn't work at all
try:
tf.reshape(rank_3_tensor, [7, -1])
except Exception as e:
print(e)
# Use the `Tensor.dtype` property
# You can cast from type to type
# TODO 2b
the_f64_tensor = # TODO -- Your code here.
the_f16_tensor = # TODO -- Your code here.
# Now, let's cast to an uint8 and lose the decimal precision
the_u8_tensor = # TODO -- Your code here.
print(the_u8_tensor)
x = tf.constant([1, 2, 3])
y = tf.constant(2)
z = tf.constant([2, 2, 2])
# All of these are the same computation
print(tf.multiply(x, 2))
print(x * y)
print(x * z)
# These are the same computations
x = tf.reshape(x, [3, 1])
y = tf.range(1, 5)
print(x, "\n")
print(y, "\n")
print(tf.multiply(x, y))
x_stretch = tf.constant([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]])
y_stretch = tf.constant([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
print(x_stretch * y_stretch) # Again, operator overloading
print(tf.broadcast_to(tf.constant([1, 2, 3]), [3, 3]))
ragged_list = [[0, 1, 2, 3], [4, 5], [6, 7, 8], [9]]
try:
tensor = tf.constant(ragged_list)
except Exception as e:
print(e)
# TODO 2c
ragged_tensor = # TODO -- Your code here.
print(ragged_tensor)
print(ragged_tensor.shape)
# Tensors can be strings, too here is a scalar string.
scalar_string_tensor = tf.constant("Gray wolf")
print(scalar_string_tensor)
# If we have two string tensors of different lengths, this is OK.
tensor_of_strings = tf.constant(["Gray wolf", "Quick brown fox", "Lazy dog"])
# Note that the shape is (2,), indicating that it is 2 x unknown.
print(tensor_of_strings)
tf.constant("🥳👍")
# We can use split to split a string into a set of tensors
print(tf.strings.split(scalar_string_tensor, sep=" "))
# ...but it turns into a `RaggedTensor` if we split up a tensor of strings,
# as each string might be split into a different number of parts.
print(tf.strings.split(tensor_of_strings))
text = tf.constant("1 10 100")
print(tf.strings.to_number(tf.strings.split(text, " ")))
byte_strings = tf.strings.bytes_split(tf.constant("Duck"))
byte_ints = tf.io.decode_raw(tf.constant("Duck"), tf.uint8)
print("Byte strings:", byte_strings)
print("Bytes:", byte_ints)
# Or split it up as unicode and then decode it
unicode_bytes = tf.constant("アヒル 🦆")
unicode_char_bytes = tf.strings.unicode_split(unicode_bytes, "UTF-8")
unicode_values = tf.strings.unicode_decode(unicode_bytes, "UTF-8")
print("\nUnicode bytes:", unicode_bytes)
print("\nUnicode chars:", unicode_char_bytes)
print("\nUnicode values:", unicode_values)
# Sparse tensors store values by index in a memory-efficient manner
# TODO 2d
sparse_tensor = # TODO -- Your code here.
print(sparse_tensor, "\n")
# We can convert sparse tensors to dense
print(tf.sparse.to_dense(sparse_tensor))
import tensorflow as tf
# Uncomment to see where your variables get placed (see below)
# tf.debugging.set_log_device_placement(True)
# TODO 3a
my_tensor = # TODO -- Your code here.
my_variable = # TODO -- Your code here.
# Variables can be all kinds of types, just like tensors
bool_variable = tf.Variable([False, False, False, True])
complex_variable = tf.Variable([5 + 4j, 6 + 1j])
print("Shape: ", my_variable.shape)
print("DType: ", my_variable.dtype)
print("As NumPy: ", my_variable.numpy)
print("A variable:", my_variable)
print("\nViewed as a tensor:", tf.convert_to_tensor(my_variable))
print("\nIndex of highest value:", tf.argmax(my_variable))
# This creates a new tensor; it does not reshape the variable.
print("\nCopying and reshaping: ", tf.reshape(my_variable, ([1, 4])))
a = tf.Variable([2.0, 3.0])
# This will keep the same dtype, float32
a.assign([1, 2])
# Not allowed as it resizes the variable:
try:
a.assign([1.0, 2.0, 3.0])
except Exception as e:
print(e)
a = tf.Variable([2.0, 3.0])
# Create b based on the value of a
b = tf.Variable(a)
a.assign([5, 6])
# a and b are different
print(a.numpy())
print(b.numpy())
# There are other versions of assign
print(a.assign_add([2, 3]).numpy()) # [7. 9.]
print(a.assign_sub([7, 9]).numpy()) # [0. 0.]
# Create a and b; they have the same value but are backed by different tensors.
a = tf.Variable(my_tensor, name="Mark")
# A new variable with the same name, but different value
# Note that the scalar add is broadcast
b = tf.Variable(my_tensor + 1, name="Mark")
# These are elementwise-unequal, despite having the same name
print(a == b)
step_counter = tf.Variable(1, trainable=False)
with tf.device("CPU:0"):
# Create some tensors
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c = tf.matmul(a, b)
print(c)
with tf.device("CPU:0"):
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.Variable([[1.0, 2.0, 3.0]])
with tf.device("GPU:0"):
# Element-wise multiply
k = a * b
print(k)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lab Task 1
Step2: A "vector" or "rank-1" tensor is like a list of values. A vector has 1-axis
Step3: A "matrix" or "rank-2" tensor has 2-axes
Step4: <table>
Step5: There are many ways you might visualize a tensor with more than 2-axes.
Step6: Tensors often contain floats and ints, but have many other types, including
Step7: Tensors are used in all kinds of operations (ops).
Step8: About shapes
Step9: <table>
Step10: While axes are often referred to by their indices, you should always keep track of the meaning of each. Often axes are ordered from global to local
Step11: Indexing with a scalar removes the dimension
Step12: Indexing with a
Step13: Multi-axis indexing
Step14: Passing an integer for each index the result is a scalar.
Step15: You can index using any combination integers and slices
Step16: Here is an example with a 3-axis tensor
Step17: <table>
Step18: You can reshape a tensor into a new shape. Reshaping is fast and cheap as the underlying data does not need to be duplicated.
Step19: The data maintains it's layout in memory and a new tensor is created, with the requested shape, pointing to the same data. TensorFlow uses C-style "row-major" memory ordering, where incrementing the right-most index corresponds to a single step in memory.
Step20: If you flatten a tensor you can see what order it is laid out in memory.
Step21: Typically the only reasonable uses of tf.reshape are to combine or split adjacent axes (or add/remove 1s).
Step22: <table>
Step23: <table>
Step24: Broadcasting
Step25: Likewise, 1-sized dimensions can be stretched out to match the other arguments. Both arguments can be stretched in the same computation.
Step26: <table>
Step27: Most of the time, broadcasting is both time and space efficient, as the broadcast operation never materializes the expanded tensors in memory.
Step28: Unlike a mathematical op, for example, broadcast_to does nothing special to save memory. Here, you are materializing the tensor.
Step29: Instead create a tf.RaggedTensor using tf.ragged.constant
Step30: The shape of a tf.RaggedTensor contains unknown dimensions
Step31: String tensors
Step32: And a vector of strings
Step33: In the above printout the b prefix indicates that tf.string dtype is not a unicode string, but a byte-string. See the Unicode Tutorial for more about working with unicode text in TensorFlow.
Step34: Some basic functions with strings can be found in tf.strings, including tf.strings.split.
Step35: <table>
Step36: Although you can't use tf.cast to turn a string tensor into numbers, you can convert it into bytes, and then into numbers.
Step37: The tf.string dtype is used for all raw bytes data in TensorFlow. The tf.io module contains functions for converting data to and from bytes, including decoding images and parsing csv.
Step38: Lab Task 3
Step39: Create a variable
Step40: A variable looks and acts like a tensor, and, in fact, is a data structure backed by a tf.Tensor. Like tensors, they have a dtype and a shape, and can be exported to NumPy.
Step41: Most tensor operations work on variables as expected, although variables cannot be reshaped.
Step42: As noted above, variables are backed by tensors. You can reassign the tensor using tf.Variable.assign. Calling assign does not (usually) allocate a new tensor; instead, the existing tensor's memory is reused.
Step43: If you use a variable like a tensor in operations, you will usually operate on the backing tensor.
Step44: Lifecycles, naming, and watching
Step45: Variable names are preserved when saving and loading models. By default, variables in models will acquire unique variable names automatically, so you don't need to assign them yourself unless you want to.
Step46: Placing variables and tensors
Step47: It's possible to set the location of a variable or tensor on one device and do the computation on another device. This will introduce delay, as data needs to be copied between the devices.
|
4,799
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
Image(url='http://xray.readthedocs.org/en/latest/_images/dataset-diagram.png', embed=True, width=950, height=300)
import numpy as np
import pandas as pd
import xray
temp = 15 + 8 * np.random.randn(2, 2, 3)
VW = 15 + 10 * abs(np.random.randn(2, 2, 3))
precip = 10 * np.random.rand(2, 3)
depths = [5, 20]
lons = [-99.83, -99.79]
lats = [42.63, 42.59]
elevations = [1600, 1650]
ds = xray.Dataset({'temperature': (['site', 'depth', 'time'], temp, {'units':'C'}),
'soil_moisture': (['site', 'depth', 'time'], VW, {'units':'percent'}),
'precipitation': (['site', 'time'], precip, {'units':'mm'})},
coords={'lon': (['site'], lons, {'units':'degrees east'}),
'lat': (['site'], lats, {'units':'degrees north'}),
'elevation': (['site'], elevations, {'units':'m'}),
'site': ['Acacia', 'Riverine'],
'depth': (['depth'], depths, {'units': 'cm'}),
'time': pd.date_range('2015-05-19', periods=3)})
ds
ds.sel(site='Acacia')
print ds.soil_moisture.sel(site='Acacia', time='2015-05-19', depth=5).values
print ds.precipitation.sel(site='Acacia', time='2015-05-19').values
ds.to_dataframe()
ds.to_netcdf('test.nc')
sites = ['MainTower'] # can be replaced if there are more specific sites
lons = [36.8701] # degrees east
lats = [0.4856] # degrees north
elevations = [1610] # m above see level
coords={'site': (['site'], sites),
'lon': (['site'], lons, dict(units='degrees east')),
'lat': (['site'], lats, dict(units='degrees north')),
'elevation': (['site'], elevations, dict(units='m')),
'time': pd.date_range('2015-05-19', periods=3)}
precip = 10 * np.random.rand(1, 3)
ds = xray.Dataset({'precipitation': (['site', 'time'], precip, {'units':'mm'})},
coords=coords)
ds
df = ds.to_dataframe()
df
df.index
from __init__ import *
from TOA5_to_netcdf import *
lons = [36.8701] # degrees east
lats = [0.4856] # degrees north
elevations = [1610] # m above see level
coords={'lon': (['site'], lons, dict(units='degrees east')),
'lat': (['site'], lats, dict(units='degrees north')),
'elevation': (['site'], elevations, dict(units='m'))}
path = os.getcwd().replace('\\','/')+'/current_data/'
input_file = path + 'CR3000_SN4709_flux.dat'
input_dict = {'has_header': True,
'header_file': input_file,
'datafile': 'soil',
'path': path,
'filename': 'CR3000_SN4709_flux.dat'}
df = createDF(input_file, input_dict, attrs)[0]
attrs, local_attrs = get_attrs(input_dict['header_file'], attrs)
ds = createDS(df, input_dict, attrs, local_attrs, site, coords_vals)
ds.to_netcdf(path='test2.nc', format='NETCDF3_64BIT')
xray.open_dataset('test2.nc')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading an example file into a dataset
Step2: This is an example of what our soil moisture data from the radio tower install will look like. Each site has a lat, lon, and elevation and at the site we will record rainfall as well as soil temp and soil moisture at two depths. So there are up to 3 dimensions along which the data are recorded
Step3: Inspecting and selecting from dataset
Step4: Now if we are only interested in soil moisture at the upper depth at a specific time, we can pull out just that one data point
Step5: For precip there are no depth values, so a specific data point can be pulled just by selecting for time and site
Step6: Test what this dataset looks like in pandas and netCDF
Step7: Going back and forth between datasets and dataframes
Step8: Loading dataframes and transfering to datasets
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.