Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
10,300
|
<ASSISTANT_TASK:>
Python Code:
import NotebookImport
from Imports import *
matched_meth = pd.read_hdf(METH_STORE, 'matched_tn')
matched_meth = matched_meth.groupby(axis=1, level=[0,1]).first()
matched_meth.T.head(10).T.head()
matched_meth.shape[1] / 2
matched_rna = matched_tn
matched_mir = pd.read_hdf(MIRNA_STORE, 'matched_tn')
dx_rna = binomial_test_screen(matched_rna, fc=1.)
dx_rna = dx_rna[dx_rna.num_dx > 300]
dx_rna.sort('p').head(10)
dx_rna.ix['FOXM1']
#Do not import
fig, ax = subplots(figsize=(7.75,2.5))
paired_bp_tn_split(matched_rna.ix['ADH1B'], codes, data_type='mRNA',
ax=ax)
fig.tight_layout()
dx_rna.ix[['ADH1A','ADH1B','ADH1C']]
dx_rna.p.rank().ix[['ADH1A','ADH1B','ADH1C']]
dx_mir = binomial_test_screen(matched_mir, fc=1.)
dx_mir = dx_mir[dx_mir.num_dx > 300]
dx_mir.sort('p').head()
#Do not import
fig, ax = subplots(figsize=(6.5,2.5))
paired_bp_tn_split(matched_mir.ix['hsa-mir-21'], codes, data_type='',
ax=ax)
fig.tight_layout()
#Do not import
paired_bp_tn_split(matched_mir.ix['hsa-mir-139'], codes, data_type='')
dx_meth = binomial_test_screen(matched_meth, fc=1.)
dx_meth = dx_meth[dx_meth.num_dx > 300]
dx_meth.sort('p').head()
#Do not import
paired_bp_tn_split(matched_meth.ix['cg10216717'], codes, data_type='Beta')
def fig_1e(ax):
draw_dist(dx_meth.frac, ax=ax, lw=2.5)
draw_dist(dx_rna.frac, ax=ax, lw=2.5, bins=200)
draw_dist(dx_mir.frac, ax=ax, lw=2.5, bins=100)
ax.set_yticks([])
ax.set_xticks([0,.5,1])
ax.set_ylabel('Density')
ax.set_xlabel('Fraction')
ax.legend(('Methylation','mRNA','miRNA'), frameon=False)
prettify_ax(ax)
return ax
#Do not import
fig, ax = subplots(1,1, figsize=(5,3))
fig_1e(ax);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: matched_meth is our matched methylation data.
Step2: Read in matched Gene and miRNA expression data.
Step3: Run a simple screen for DX genes
Step4: Not only is ADH1B the most downregulated gene, but many of the other genes in the ADH familly have large effects as well. I go into this with a bit more depth in the alcohol_metabolism Notebook.
Step5: MicroRNA Screen
Step6: Methylation Screen
Step7: Plot disributions of fraction upregulated statistic
|
10,301
|
<ASSISTANT_TASK:>
Python Code:
import time
time.time()
## gets the time, still not very human readable
time.localtime()
## formats the time nicely
time.asctime(time.localtime())
## series of random numbers doesn't repeat
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Describe the results.
Step2: But we digress. Back to random numbers...
|
10,302
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
%matplotlib inline
import numpy as np
import reducer.gui
import reducer.astro_gui as astro_gui
from reducer.image_browser import ImageBrowser
import msumastro
from reducer import __version__
print __version__
# To use the sample data set:
data_dir = reducer.notebook_dir.get_data_path()
# Or, uncomment line below and modify as needed
# data_dir = 'path/to/your/data'
destination_dir = '.'
images = msumastro.ImageFileCollection(location=data_dir, keywords='*')
tt = msumastro.TableTree(images.summary_info, ['imagetyp', 'exposure'], 'file')
fits_browser = ImageBrowser(tt, demo=False, directory=data_dir)
fits_browser.display()
fits_browser.padding = '10px'
tt2 = msumastro.TableTree(images.summary_info, ['filter', 'imagetyp', 'exposure'], 'file')
im_a_tree_too = ImageBrowser(tt2, demo=False, directory=data_dir)
im_a_tree_too.display()
bias_reduction = astro_gui.Reduction(description='Reduce bias frames',
toggle_type='button',
allow_bias=False,
allow_dark=False,
allow_flat=False,
input_image_collection=images,
apply_to={'imagetyp': 'bias'},
destination=destination_dir)
bias_reduction.display()
reduced_collection = msumastro.ImageFileCollection(location=destination_dir, keywords='*')
bias_settings = astro_gui.Combiner(description="Master Bias Settings",
toggle_type='button',
file_name_base='master_bias',
image_source=reduced_collection,
apply_to={'imagetyp': 'bias'},
destination=destination_dir)
bias_settings.display()
reduced_collection = msumastro.ImageFileCollection(location=destination_dir, keywords='*')
dark_reduction = astro_gui.Reduction(description='Reduce dark frames',
toggle_type='button',
allow_bias=True,
master_source=reduced_collection,
allow_dark=False,
allow_flat=False,
input_image_collection=images,
destination=destination_dir,
apply_to={'imagetyp': 'dark'})
dark_reduction.display()
reduced_collection = msumastro.ImageFileCollection(location=destination_dir, keywords='*')
dark = astro_gui.Combiner(description="Make Master Dark",
toggle_type='button',
file_name_base='master_dark',
group_by='exposure',
image_source=reduced_collection,
apply_to={'imagetyp': 'dark'},
destination=destination_dir)
dark.display()
reduced_collection = msumastro.ImageFileCollection(location=destination_dir, keywords='*')
flat_reduction = astro_gui.Reduction(description='Reduce flat frames',
toggle_type='button',
allow_bias=True,
master_source=reduced_collection,
allow_dark=True,
allow_flat=False,
input_image_collection=images,
destination=destination_dir,
apply_to={'imagetyp': 'flat'})
flat_reduction.display()
reduced_collection = msumastro.ImageFileCollection(location=destination_dir, keywords='*')
flat = astro_gui.Combiner(description="Make Master Flat",
toggle_type='button',
file_name_base='master_flat',
group_by='exposure, filter',
image_source=reduced_collection,
apply_to={'imagetyp': 'flat'},
destination=destination_dir)
flat.display()
reduced_collection = msumastro.ImageFileCollection(location=destination_dir, keywords='*')
light_reduction = astro_gui.Reduction(description='Reduce light frames',
toggle_type='button',
allow_bias=True,
master_source=reduced_collection,
allow_dark=True,
allow_flat=True,
input_image_collection=images,
destination=destination_dir,
apply_to={'imagetyp': 'light'})
light_reduction.display()
reduced_collection = msumastro.ImageFileCollection(location=destination_dir, keywords='*')
tt3 = msumastro.TableTree(reduced_collection.summary_info, ['imagetyp', 'exposure'], 'file')
fits_browser = ImageBrowser(tt3, demo=False, directory=reduced_collection.location)
fits_browser.display()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enter name of directory that contains your data in the cell below, or...
Step2: Type any comments about this dataset here
Step3: Image Summary
Step4: You can reconfigure the image browser if you want (or not)
Step5: Make a master bias image
Step6: Combine bias images to make master bias
Step7: Make a master dark
Step8: Combine reduced darks to make master(s)
Step9: Make master flats
Step10: Build masters by combining
Step11: Reduce the science images
Step12: Wonder what the reduced images look like? Make another image browser...
|
10,303
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import shapely.geometry
import shapely.ops
import cartopy
import cartopy.io.shapereader as shpreader
point = shapely.geometry.Point(0.2, 1.0)
# Notice, the ipython '__repr__' (representation) displays the point as the output
point
# See some of the point attributes
print(point.geom_type)
print(point.area)
print(point.length)
print(point.bounds)
print(point.coords) # explore this object...
print(point.coords[:])
# A collection of random points...
point1 = shapely.geometry.Point(0.2, 1.0)
point2 = shapely.geometry.Point(0.1, 0.2)
point3 = shapely.geometry.Point(0.5, 0.0)
point4 = shapely.geometry.Point(0.8, 0.5)
# We can see the distance of the points from one another
point2.distance(point1)
# An ordered sequence of points is a `LineString'
line = shapely.geometry.LineString((point1, point2, point3, point4))
line
# To make a closed shape, use a `LinearRing'
closed_line = shapely.geometry.LinearRing(line)
closed_line
# Some of the attributes of the LinearRing..
print(closed_line.area) # no area since it is not 'filled'
print(closed_line.length) # the perimeter of the shape
print(closed_line.bounds) # an xy-plane bounding box
# Let's make a circle, with some noise
N = 50
x = np.cos(np.linspace(0, 2.0*np.pi, N+1))[:-1] + 0.05*np.random.randn(50)
y = np.sin(np.linspace(0, 2.0*np.pi, N+1))[:-1] + 0.05*np.random.randn(50)
plt.plot(x, y, '-')
xy = zip(x, y)
poly = shapely.geometry.Polygon(xy)
poly
# Some of the Polygon attributes
print(poly.area) # does this make sense?
print(poly.centroid)
print(poly.contains(point1), poly.contains(point2))
poly.boundary
dialated = line.buffer(0.3)
eroded = dialated.buffer(-0.2)
plt.plot(*line.xy)
plt.fill(*dialated.boundary.xy, 'g', alpha=0.2)
plt.fill(*eroded.boundary.xy, 'b', alpha=0.2)
poly.buffer(-.8)
# First, let's make a collection of fat points.
xy = np.random.rand(20, 2)
points = shapely.geometry.MultiPoint(xy).buffer(0.1)
points
points.boundary
# make a new line
line2 = shapely.geometry.LineString([(0, 0), (1, 1)])
# loop over the polygons in the regions and plot, colored
# differently if they intersect line1
for poly in points.boundary:
if poly.intersects(line2):
color = 'r'
else:
color = 'k'
coords = poly.coords[:]
x, y = zip(*coords)
plt.fill(x, y, color=color, alpha=0.3)
# Plot the line also
x, y = zip(*line2.coords[:])
plt.plot(x, y, '--k')
plt.gca().set_aspect(1.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0D shapes
Step2: 1D shapes
Step3: 2D shapes, Polygons and Buffers
Step4: We can also create 2D objects by adding buffers to existing 0D and 1D objects
Step5: Operations
|
10,304
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow_datasets as tfds
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
BUFFER_SIZE = 10 # Use a much larger value for real code
BATCH_SIZE = 64
NUM_EPOCHS = 5
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
test_data = mnist_test.map(scale).batch(BATCH_SIZE)
STEPS_PER_EPOCH = 5
train_data = train_data.take(STEPS_PER_EPOCH)
test_data = test_data.take(STEPS_PER_EPOCH)
image_batch, label_batch = next(iter(train_data))
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
# Model is the full model w/o custom layers
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_data, epochs=NUM_EPOCHS)
loss, acc = model.evaluate(test_data)
print("Loss {}, Accuracy {}".format(loss, acc))
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(0.001)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(NUM_EPOCHS):
for inputs, labels in train_data:
train_step(inputs, labels)
print("Finished epoch", epoch)
class DynamicRNN(tf.keras.Model):
def __init__(self, rnn_cell):
super(DynamicRNN, self).__init__(self)
self.cell = rnn_cell
@tf.function(input_signature=[tf.TensorSpec(dtype=tf.float32, shape=[None, None, 3])])
def call(self, input_data):
# [batch, time, features] -> [time, batch, features]
input_data = tf.transpose(input_data, [1, 0, 2])
timesteps = tf.shape(input_data)[0]
batch_size = tf.shape(input_data)[1]
outputs = tf.TensorArray(tf.float32, timesteps)
state = self.cell.get_initial_state(batch_size = batch_size, dtype=tf.float32)
for i in tf.range(timesteps):
output, state = self.cell(input_data[i], state)
outputs = outputs.write(i, output)
return tf.transpose(outputs.stack(), [1, 0, 2]), state
lstm_cell = tf.keras.layers.LSTMCell(units = 13)
my_rnn = DynamicRNN(lstm_cell)
outputs, state = my_rnn(tf.random.normal(shape=[10,20,3]))
print(outputs.shape)
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
cce([[1, 0]], [[-1.0,3.0]]).numpy()
# Create the metrics
loss_metric = tf.keras.metrics.Mean(name='train_loss')
accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Update the metrics
loss_metric.update_state(total_loss)
accuracy_metric.update_state(labels, predictions)
for epoch in range(NUM_EPOCHS):
# Reset the metrics
loss_metric.reset_states()
accuracy_metric.reset_states()
for inputs, labels in train_data:
train_step(inputs, labels)
# Get the metric results
mean_loss=loss_metric.result()
mean_accuracy = accuracy_metric.result()
print('Epoch: ', epoch)
print(' loss: {:.3f}'.format(mean_loss))
print(' accuracy: {:.3f}'.format(mean_accuracy))
model.compile(
optimizer = tf.keras.optimizers.Adam(0.001),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics = ['acc', 'accuracy', tf.keras.metrics.SparseCategoricalAccuracy(name="my_accuracy")])
history = model.fit(train_data)
history.history.keys()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Effective Tensorflow 2
Step2: Recommendations for idiomatic TensorFlow 2
Step3: Then prepare the data for training
Step4: To keep the example short, trim the dataset to only return 5 batches
Step5: Use regular Python iteration to iterate over training data that fits in memory. Otherwise, tf.data.Dataset is the best way to stream training data from disk. Datasets are iterables (not iterators), and work just like other Python iterables in eager execution. You can fully utilize dataset async prefetching/streaming features by wrapping your code in tf.function, which replaces Python iteration with the equivalent graph operations using AutoGraph.
Step6: <a name="custom_loop"></a>
Step7: Take advantage of tf.function with Python control flow
Step8: Read the tf.function guide for a more information.
Step9: Use metrics to collect and display data
Step10: Keras metric names
|
10,305
|
<ASSISTANT_TASK:>
Python Code:
# Import some libraries that will be necessary for working with data and displaying plots
# To visualize plots in the notebook
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # To read matlab files
import pylab
# For the student tests (only for python 2)
import sys
if sys.version_info.major==2:
from test_helper import Test
K = 35
n_grid = 200
frec = 3
std_n = 0.3
# Location of the training points
X_tr = (3 * np.random.random((K, 1)) - 0.5)
# Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(K, 1)
# Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
# Gererate random prediction curves
f1 = np.random.random() + np.random.random()*X_grid
f2 = np.random.random() + np.random.random()*X_grid + \
np.random.random()*(X_grid**2)
f3 = np.random.random() + np.random.random()*X_grid + \
np.random.random()*(X_grid**2) + np.random.random()*(X_grid**3)
plt.plot(X_tr,S_tr,'b.')
plt.plot(X_grid,f1.T,'g-',label='Arbitrary Linear function')
plt.plot(X_grid,f2.T,'r-',label='Arbitrary Quadratic function')
plt.plot(X_grid,f3.T,'m-',label='Arbitrary Cubic function')
plt.legend(loc='best')
plt.show()
## Next, we represent some random polynomial functions for degrees between 0 and 14
max_degree = 15
K = 200
#Values of X to evaluate the function
X_grid = np.linspace(-1.5, 1.5, K)
for idx in range(max_degree):
x1 = plt.subplot(3,5, idx+1)
x1.get_xaxis().set_ticks([])
x1.get_yaxis().set_ticks([])
for kk in range(5):
#Random generation of coefficients for the model
we = np.random.randn(idx+1, 1)
#Evaluate the polynomial with previous coefficients at X_grid values
fout = np.polyval(we, X_grid)
x1.plot(X_grid,fout,'g-')
x1.set_ylim([-5,5])
n_points = 20
n_grid = 200
frec = 3
std_n = 0.2
max_degree = 20
colors = 'brgcmyk'
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
#We start by building the Z matrix
Z = []
for el in X_tr.tolist():
Z.append([el[0]**k for k in range(max_degree+1)])
Z = np.matrix(Z)
Z_grid = []
for el in X_grid.tolist():
Z_grid.append([el**k for k in range(max_degree+1)])
Z_grid = np.matrix(Z_grid)
plt.plot(X_tr,S_tr,'b.')
for k in [1, 2, n_points]: # range(max_degree+1):
Z_iter = Z[:,:k+1]
# Least square solution
#w_LS = (np.linalg.inv(Z_iter.T.dot(Z_iter))).dot(Z_iter.T).dot(S_tr)
# Least squares solution, with leass numerical errors
w_LS, resid, rank, s = np.linalg.lstsq(Z_iter, S_tr, rcond=None)
#estimates at all grid points
fout = Z_grid[:,:k+1].dot(w_LS)
fout = np.array(fout).flatten()
plt.plot(X_grid,fout,colors[k%len(colors)]+'-',label='Degree '+str(k))
plt.legend(loc='best')
plt.ylim(1.2*np.min(S_tr), 1.2*np.max(S_tr))
plt.show()
n_points = 35
n_test = 200
n_grid = 200
frec = 3
std_n = 0.7
max_degree = 25
colors = 'brgcmyk'
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Test points to validate the generalization of the solution
X_tst = (3 * np.random.random((n_test,1)) - 0.5)
S_tst = np.cos(frec*X_tst) + std_n * np.random.randn(n_test,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
#We start by building the Z matrix
def extend_matrix(X,max_degree):
Z = []
X = X.reshape((X.shape[0],1))
for el in X.tolist():
Z.append([el[0]**k for k in range(max_degree+1)])
return np.matrix(Z)
Z = extend_matrix(X_tr,max_degree)
Z_grid = extend_matrix(X_grid,max_degree)
Z_test = extend_matrix(X_tst,max_degree)
#Variables to store the train and test errors
tr_error = []
tst_error = []
for k in range(max_degree):
Z_iter = Z[:,:k+1]
#Least square solution
#w_LS = (np.linalg.inv(Z_iter.T.dot(Z_iter))).dot(Z_iter.T).dot(S_tr)
# Least squares solution, with leass numerical errors
w_LS, resid, rank, s = np.linalg.lstsq(Z_iter, S_tr)
#estimates at traint and test points
f_tr = Z_iter.dot(w_LS)
f_tst = Z_test[:,:k+1].dot(w_LS)
tr_error.append(np.array((S_tr-f_tr).T.dot(S_tr-f_tr)/len(S_tr))[0,0])
tst_error.append(np.array((S_tst-f_tst).T.dot(S_tst-f_tst)/len(S_tst))[0,0])
plt.stem(range(max_degree),tr_error,'b-',label='Train error')
plt.stem(range(max_degree),tst_error,'r-o',label='Test error')
plt.legend(loc='best')
plt.show()
n_points = 15
n_grid = 200
frec = 3
std_n = 0.2
n_val_16 = 5
degree = 18
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
X_grid = np.linspace(-.5,2.5,n_grid)
S_grid = - np.cos(frec*X_grid) #Noise free for the true model
X_16 = .3 * np.ones((n_val_16,))
S_16 = np.linspace(np.min(S_tr),np.max(S_tr),n_val_16)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X_tr,S_tr,'b.',markersize=10)
ax.plot(X_16,S_16,'ro',markersize=6)
ax.plot(X_grid,S_grid,'r-',label='True model')
for el in zip(X_16,S_16):
#Add point to the training set
X_tr_iter = np.append(X_tr,el[0])
S_tr_iter = np.append(S_tr,el[1])
#Obtain LS regression coefficients and evaluate it at X_grid
w_LS = np.polyfit(X_tr_iter, S_tr_iter, degree)
S_grid_iter = np.polyval(w_LS,X_grid)
ax.plot(X_grid,S_grid_iter,'g-')
ax.set_xlim(-.5,2.5)
ax.set_ylim(S_16[0]-2,S_16[-1]+2)
ax.legend(loc='best')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook covers the problem of fitting parametric regression models with a minimum least-squares criterion. The material presented here is based on the first lectures of this <a haref=http
Step2: 1.1. Parametric model
Step3: Should we choose a polynomial?
Step4: 2.4. Overfitting the training data
Step5: 2.4.1 Limitations of the LS approach. The need for assumptions
|
10,306
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy as sp
%matplotlib inline
T = 600 #[s]
fs = 500 #[Hz]
f0 = 0.04 #[Hz]
f1 = 10 #[Hz]
f2 = 11 #[Hz]
t = np.linspace(0,T,fs*T)
signal = 10*(np.cos(2*np.pi*f0*t))**8*np.sin(2*np.pi*f1*t) + \
10*np.exp(4*(t-T)/T)*np.cos(2*np.pi*f2*t)
noise = np.random.normal(0,0.3,T*fs)
signal = signal + noise
#PLOT
fig, ax = plt.subplots(nrows=2,ncols=1,figsize=(8,6))
ax[0].plot(t,signal)
ax[0].set_ylabel('Amplitude')
ax[1].plot(t,signal)
ax[1].set_ylabel('Amplitude')
ax[1].set_xlabel('Time (s)')
ax[1].set_xlim([295,305])
ax[1].set_ylim([-12,12])
fig.tight_layout()
fig, ax = plt.subplots(nrows=1,ncols=2,figsize=(11,4))
im1 = ax[0].specgram(signal,NFFT=1000,Fs=500,noverlap=500,interpolation='none')
ax[0].set_ylim([0,20])
ax[0].set_ylabel('Frequency (Hz)')
ax[0].set_xlabel('Time (s)')
im1[3].set_clim(-40,10)
im2 = ax[1].specgram(signal,NFFT=1000,Fs=500,noverlap=500,interpolation='none')
ax[1].set_xlim([250,350])
ax[1].set_ylim([7,13])
ax[1].set_xlabel('Time (s)')
fig.tight_layout()
im2[3].set_clim(-40,10)
cb = fig.colorbar(im2[3])
# Define Matrix F
numSamples = fs*T #num samples
W = 1000 #window size
K = W #frequency bands
N = numSamples//W #number of windows
F = np.zeros([W,K])
k = np.array(range(1,K//2+1))
l = np.array(range(1,W+1))
for jj in range(0,np.size(k)):
for ii in range(0,np.size(l)):
F[ii,jj] = np.cos(2*np.pi*l[ii]*(k[jj]-1)/K)
F[ii,jj+K//2] = np.sin(2*np.pi*l[ii]*(k[jj]-1)/K)
#plt.imshow(F)
#print(np.shape(F))
#initialize
Q = np.eye(K)*0.001
xKalman = np.zeros([K,N+1])
xPredict = np.zeros([K,N+1])
sigKalman = np.zeros([K,K,N+1])
sigPredict = np.zeros([K,K,N+1])
sigKalman[:,:,0] = np.eye(K)
#Kalman Filter
for n in range(0,N):
y = signal[n*W:(n+1)*W]
xPredict[:,n+1] = xKalman[:,n]
sigPredict[:,:,n+1] = sigKalman[:,:,n] + Q
gainK = np.dot(sigPredict[:,:,n+1],F.T).dot(np.linalg.inv(np.dot(F,sigPredict[:,:,n+1]).dot(F.T)+np.eye(K)))
xKalman[:,n+1] = xPredict[:,n+1] + np.dot(gainK,y-np.dot(F,xPredict[:,n+1]))
sigKalman[:,:,n+1] = sigPredict[:,:,n+1] - np.dot(gainK,F).dot(sigPredict[:,:,n+1])
#remove initial conditions
xKalman = xKalman[:,1:N+1]
xPredict = xPredict[:,1:N+1]
sigKalman = sigKalman[:,:,1:N+1]
sigPredict = sigPredict[:,:,1:N+1]
xEst = xKalman[0:K//2,:]-xKalman[K//2:W,:]*1j
xPSD = 10*np.log10(np.abs(xEst)**2)
fig, ax = plt.subplots(nrows=1,ncols=2,figsize=(11,4))
im1 = ax[0].imshow(xPSD,origin='lower',extent=[0,N*W//fs,0,fs//2-5],aspect='auto',interpolation='none')
ax[0].set_ylim([0,20])
ax[0].set_ylabel('Frequency (Hz)')
ax[0].set_xlabel('Time (s)')
im1.set_clim(-40,10)
im2 = ax[1].imshow(xPSD,origin='lower',extent=[0,N*W//fs,0,fs//2-5],aspect='auto',interpolation='none')
ax[1].set_xlim([250,350])
ax[1].set_ylim([7,13])
ax[1].set_xlabel('Time (s)')
fig.tight_layout()
im2.set_clim(-40,10)
cb = fig.colorbar(im2)
xSmooth = xKalman
sigSmooth = sigKalman
for n in range(N-2,-1,-1):
B = np.dot(sigKalman[:,:,n],np.linalg.inv(sigPredict[:,:,n+1]))
xSmooth[:,n] = xKalman[:,n] + np.dot(B,(xSmooth[:,n+1]-xPredict[:,n+1]))
sigSmooth[:,:,n] = sigKalman[:,:,n] + np.dot(B,(sigSmooth[:,:,n+1]-sigPredict[:,:,n+1])).dot(B.T)
xEst = xSmooth[0:K//2,:]-xSmooth[K//2:W,:]*1j
xPSD = 10*np.log10(np.abs(xEst)**2)
fig, ax = plt.subplots(nrows=1,ncols=2,figsize=(11,4))
im1 = ax[0].imshow(xPSD,origin='lower',extent=[0,N*W//fs,0,fs//2-5],aspect='auto',interpolation='none')
ax[0].set_ylim([0,20])
ax[0].set_ylabel('Frequency (Hz)')
ax[0].set_xlabel('Time (s)')
im1.set_clim(-40,10)
im2 = ax[1].imshow(xPSD,origin='lower',extent=[0,N*W//fs,0,fs//2-5],aspect='auto',interpolation='none')
ax[1].set_xlim([250,350])
ax[1].set_ylim([7,13])
ax[1].set_xlabel('Time (s)')
fig.tight_layout()
im2.set_clim(-40,10)
cb = fig.colorbar(im2)
#Parameters
alpha = 21000
tol = 0.005
maxIter = 10
Q = np.eye(K)*0.001
iter = 1
while (iter <= maxIter):
#Step 1:
#initialize
xKalman = np.zeros([K,N+1])
xPredict = np.zeros([K,N+1])
sigKalman = np.zeros([K,K,N+1])
sigPredict = np.zeros([K,K,N+1])
sigKalman[:,:,0] = np.eye(K)
#Kalman Filter
for n in range(0,N):
y = signal[n*W:(n+1)*W]
xPredict[:,n+1] = xKalman[:,n]
sigPredict[:,:,n+1] = sigKalman[:,:,n] + Q
gainK = np.dot(sigPredict[:,:,n+1],F.T).dot(np.linalg.inv(np.dot(F,sigPredict[:,:,n+1]).dot(F.T)+np.eye(K)))
xKalman[:,n+1] = xPredict[:,n+1] + np.dot(gainK,y-np.dot(F,xPredict[:,n+1]))
sigKalman[:,:,n+1] = sigPredict[:,:,n+1] - np.dot(gainK,F).dot(sigPredict[:,:,n+1])
#remove initial conditions
xKalman = xKalman[:,1:N+1]
xPredict = xPredict[:,1:N+1]
sigKalman = sigKalman[:,:,1:N+1]
sigPredict = sigPredict[:,:,1:N+1]
#Step 2:
#initialize
xSmooth = xKalman
sigSmooth = sigKalman
#Kalman Smoother
for n in range(N-2,-1,-1):
B = np.dot(sigKalman[:,:,n],np.linalg.inv(sigPredict[:,:,n+1]))
xSmooth[:,n] = xKalman[:,n] + np.dot(B,(xSmooth[:,n+1]-xPredict[:,n+1]))
sigSmooth[:,:,n] = sigKalman[:,:,n] + np.dot(B,(sigSmooth[:,:,n+1]-sigPredict[:,:,n+1])).dot(B.T)
#Step 4:
if iter > 1 and np.linalg.norm(xSmooth-xPrev,'fro')/np.linalg.norm(xPrev,'fro') < tol:
break
#Step 5: Update Q
Q = np.zeros([K,K])
for k in range(0,K):
qTemp = 0
for n in range(1,N):
qTemp += (xSmooth[k,n]-xSmooth[k,n-1])**2
Q[k,k] = (qTemp + np.finfo(float).eps**2)**(1/2)/alpha
xPrev = xSmooth
iter += 1
print(iter-1)
xEst = xSmooth[0:K//2,:]-xSmooth[K//2:W,:]*1j
xPSD = 10*np.log10(np.abs(xEst)**2)
fig, ax = plt.subplots(nrows=1,ncols=2,figsize=(11,4))
im1 = ax[0].imshow(xPSD,origin='lower',extent=[0,N*W//fs,0,fs//2-5],aspect='auto',interpolation='none')
ax[0].set_ylim([0,20])
ax[0].set_ylabel('Frequency (Hz)')
ax[0].set_xlabel('Time (s)')
im1.set_clim(-40,10)
im2 = ax[1].imshow(xPSD,origin='lower',extent=[0,N*W//fs,0,fs//2-5],aspect='auto',interpolation='none')
ax[1].set_xlim([250,350])
ax[1].set_ylim([7,13])
ax[1].set_xlabel('Time (s)')
fig.tight_layout()
im2.set_clim(-40,10)
cb = fig.colorbar(im2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Toy Example
Step2: The following is a spectrogram of the simulated signal that highlights the limitations of classical frequency analysis. The analysis is unable to resolve the closely spaced signals of 10 and 11 Hz in the frequency domain.
Step3: Robust Spectral Decomposition
Step4: The objective is to compute an estimate $\hat{x}$ of $x$ given the data $y$. The component-wise magnitude-squared of $\hat{x}$ gives an estimate of the magnitude spectrum of $y$. By treating $\left(x_{n}\right){n=1}^{N}$ as a sequence of random variables and carefully selecting a prior distribution, a stochastic continuity constraint can be established across time. By imposing a model on the components $\left(x{n,k}\right)_{k=1}^{K}$ for each $n = 1,2,..,N$, sparsity is enforced in the frequency domain. The stochastic continuity constraint can be expressed in the form of the first-order difference equation
Step5: Kalman Smoother
Step6: IRLS Algorithm for Spectrotemporal Pursuit
|
10,307
|
<ASSISTANT_TASK:>
Python Code:
from halomod import TracerHaloModel
import numpy as np
from matplotlib import pyplot as plt
hm = TracerHaloModel(hod_model="Constant", transfer_model='EH')
hm.central_occupation
plt.plot(np.log10(hm.m),hm.satellite_occupation)
from halomod.concentration import CMRelation
from hmf.halos.mass_definitions import SOMean
class CMFlux(CMRelation):
_defaults = {'c_0': 4}
native_mdefs = (SOMean(),)
def cm(self,m,z):
return self.params['c_0']*(m*10**(-11))**(1/3)
hm = TracerHaloModel(
halo_concentration_model = CMFlux,
halo_profile_model = "PowerLawWithExpCut",
halo_profile_params = {"b":2.0,"a":1.0},
hod_model = "Constant",
transfer_model='EH',
)
plt.plot(np.log10(hm.k_hm),hm.tracer_profile.u(hm.k_hm,m=1e12), label='$m = 10^{12}$')
plt.plot(np.log10(hm.k_hm),hm.tracer_profile.u(hm.k_hm,m=1e13), label='$m = 10^{13}$')
plt.plot(np.log10(hm.k_hm),hm.tracer_profile.u(hm.k_hm,m=1e14), label='$m = 10^{14}$')
plt.legend();
rhoc = hm.cosmo.critical_density0.to("Msun/Mpc^3").value*hm.cosmo.h**2
hm.mean_tracer_den/rhoc
-np.log10(hm.mean_tracer_den/rhoc)
hm.hod_params = {"logA":-np.log10(hm.mean_tracer_den/rhoc)}
hm.mean_tracer_den/rhoc
rs = hm.halo_profile.scale_radius(1e11)
print(rs)
hm.halo_profile.scale_radius(1e12)
hm.halo_profile_params = {"a":rs/10}
plt.plot(hm.k_hm,hm.tracer_profile.u(hm.k_hm,m=1e12))
plt.xlabel("Scale [h/Mpc]")
plt.ylabel("Normalized Fourier Density")
plt.xscale('log')
plt.plot(hm.k_hm,hm.power_1h_auto_tracer, ls='--', color='C0', label='1halo')
plt.plot(hm.k_hm,hm.power_2h_auto_tracer, ls=':', color='C0', label='2halo')
plt.plot(hm.k_hm,hm.power_auto_tracer, color='C0', label='full')
plt.legend()
plt.xscale('log')
plt.yscale('log')
plt.ylim(1e-5,)
plt.xlabel("Fourier Scale, $k$")
plt.ylabel("Auto Power Spectrum")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Density Profile
Step2: One can see that indeed the density profile is now independant of halo mass
Step3: That means the parameter logA for the HOD should be changed to
Step4: We can simply set this on the existing model (everything that's dependent on it will be auto-updated)
Step5: The density profile should satisfy $r_s/a = \lambda_{\rm mfp}$. $r_s$ can be obtained as
Step6: Just to make sure, we calculate $r_s$ for a different halo mass
Step7: in the units of Mpc/h. Assume we want $\lambda_{\rm mfp} = 10$Mpc/h
Step8: Check the density profile to see the cut-off
Step9: You can see it's indeed around 0.1 Mpc$^{-1}$h
|
10,308
|
<ASSISTANT_TASK:>
Python Code:
particles = ["B+/-","D+/-","J/Psi"]
lifetimes = [1.64e-12,1.4e-12,7e-21]
c = 3e8 # m/s
v = c
for p,l in zip(particles,lifetimes):
distance = v*l
print "%-5s lifetime=%4.2e s distance traveled=%4.2e m" % (p,l,distance)
# Your code here
# Your code here
# Your code here
# Your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3>Particles</h3>
Step2: The scale of many modern physics detectors ranges from the order of centimeters to 10's of meters. Given that information, what particles do you think will actually live long enough to travel <i>through</i> parts of the detector?
Step3: Which particles will decay (on average) before they reach the detectors? This means that these particles have to be reconstructed from their decay products.
Step4: Make a plot where the x-axis is the names of the above particles (or a number corresponding to each, where the number/particle relationship is clearly identified) and the y-axis is the lifetime of the particle. Color code the data points according to whether the primary decay is EM, weak, or strong. {\it Do not plot the proton and neutron on this plot}. Do you see the "pattern"? You may need to use a log-scale for the y-axis.
|
10,309
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
total_tosses = 30
num_heads = 24
prob_head = 0.5
#0 is tail. 1 is heads. Generate one experiment
experiment = np.random.randint(0,2,total_tosses)
print("Data of the Experiment:", experiment)
#Find the number of heads
print("Heads in the Experiment:", experiment[experiment==1]) #This will give all the heads in the array
head_count = experiment[experiment==1].shape[0] #This will get the count of heads in the array
print("Number of heads in the experiment:", head_count)
#Now, the above experiment needs to be repeated 100 times. Let's write a function and put the above code in a loop
def coin_toss_experiment(times_to_repeat):
head_count = np.empty([times_to_repeat,1], dtype=int)
for times in np.arange(times_to_repeat):
experiment = np.random.randint(0,2,total_tosses)
head_count[times] = experiment[experiment==1].shape[0]
return head_count
head_count = coin_toss_experiment(100)
head_count[:10]
print("Dimensions:", head_count.shape, "\n","Type of object:", type(head_count))
#Let's plot the above distribution
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set(color_codes = True)
sns.distplot(head_count, kde=False)
sns.distplot(head_count, kde=True)
#Number of times the experiment returned 24 heads.
head_count[head_count>=24]
print("No of times experiment returned 24 heads or more:", head_count[head_count>=24].shape[0])
print("% of times with 24 or more heads: ", head_count[head_count>=24].shape[0]/float(head_count.shape[0])*100)
def coin_toss_experiment_2(times_to_repeat):
head_count = np.empty([times_to_repeat,1], dtype=int)
experiment = np.random.randint(0,2,[times_to_repeat,total_tosses])
return experiment.sum(axis=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise
Step2: Exercise
|
10,310
|
<ASSISTANT_TASK:>
Python Code:
# Import
from __future__ import division
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (20.0, 10.0)
inline_rc = dict(mpl.rcParams)
from classification_utilities import make_facies_log_plot
import pandas as pd
import numpy as np
#import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import LeavePGroupsOut
from sklearn.metrics import f1_score
from sklearn.multiclass import OneVsOneClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from scipy.signal import medfilt
import sys, scipy, sklearn
print('Python: ' + sys.version.split('\n')[0])
print(' ' + sys.version.split('\n')[1])
print('Pandas: ' + pd.__version__)
print('Numpy: ' + np.__version__)
print('Scipy: ' + scipy.__version__)
print('Sklearn: ' + sklearn.__version__)
# Parameters
feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS']
facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
# Load data from file
data = pd.read_csv('../facies_vectors.csv')
# Store features and labels
X = data[feature_names].values # features
y = data['Facies'].values # labels
# Store well labels and depths
well = data['Well Name'].values
depth = data['Depth'].values
# Define function for plotting feature statistics
def plot_feature_stats(X, y, feature_names, facies_colors, facies_names):
# Remove NaN
nan_idx = np.any(np.isnan(X), axis=1)
X = X[np.logical_not(nan_idx), :]
y = y[np.logical_not(nan_idx)]
# Merge features and labels into a single DataFrame
features = pd.DataFrame(X, columns=feature_names)
labels = pd.DataFrame(y, columns=['Facies'])
for f_idx, facies in enumerate(facies_names):
labels[labels[:] == f_idx] = facies
data = pd.concat((labels, features), axis=1)
# Plot features statistics
facies_color_map = {}
for ind, label in enumerate(facies_names):
facies_color_map[label] = facies_colors[ind]
sns.pairplot(data, hue='Facies', palette=facies_color_map, hue_order=list(reversed(facies_names)))
# Feature distribution
# plot_feature_stats(X, y, feature_names, facies_colors, facies_names)
# mpl.rcParams.update(inline_rc)
# Facies per well
for w_idx, w in enumerate(np.unique(well)):
ax = plt.subplot(3, 4, w_idx+1)
hist = np.histogram(y[well == w], bins=np.arange(len(facies_names)+1)+.5)
plt.bar(np.arange(len(hist[0])), hist[0], color=facies_colors, align='center')
ax.set_xticks(np.arange(len(hist[0])))
ax.set_xticklabels(facies_names)
ax.set_title(w)
# Features per well
for w_idx, w in enumerate(np.unique(well)):
ax = plt.subplot(3, 4, w_idx+1)
hist = np.logical_not(np.any(np.isnan(X[well == w, :]), axis=0))
plt.bar(np.arange(len(hist)), hist, color=facies_colors, align='center')
ax.set_xticks(np.arange(len(hist)))
ax.set_xticklabels(feature_names)
ax.set_yticks([0, 1])
ax.set_yticklabels(['miss', 'hit'])
ax.set_title(w)
def make_pe(X, seed):
reg = RandomForestRegressor(max_features='sqrt', n_estimators=50, random_state=seed)
DataImpAll = data[feature_names].copy()
DataImp = DataImpAll.dropna(axis = 0, inplace=False)
Ximp=DataImp.loc[:, DataImp.columns != 'PE']
Yimp=DataImp.loc[:, 'PE']
reg.fit(Ximp, Yimp)
X[np.array(DataImpAll.PE.isnull()),4] = reg.predict(DataImpAll.loc[DataImpAll.PE.isnull(),:].drop('PE',axis=1,inplace=False))
return X
# Feature windows concatenation function
def augment_features_window(X, N_neig):
# Parameters
N_row = X.shape[0]
N_feat = X.shape[1]
# Zero padding
X = np.vstack((np.zeros((N_neig, N_feat)), X, (np.zeros((N_neig, N_feat)))))
# Loop over windows
X_aug = np.zeros((N_row, N_feat*(2*N_neig+1)))
for r in np.arange(N_row)+N_neig:
this_row = []
for c in np.arange(-N_neig,N_neig+1):
this_row = np.hstack((this_row, X[r+c]))
X_aug[r-N_neig] = this_row
return X_aug
# Feature gradient computation function
def augment_features_gradient(X, depth):
# Compute features gradient
d_diff = np.diff(depth).reshape((-1, 1))
d_diff[d_diff==0] = 0.001
X_diff = np.diff(X, axis=0)
X_grad = X_diff / d_diff
# Compensate for last missing value
X_grad = np.concatenate((X_grad, np.zeros((1, X_grad.shape[1]))))
return X_grad
# Feature augmentation function
def augment_features(X, well, depth, seed=None, pe=True, N_neig=1):
seed = seed or None
if pe:
X = make_pe(X, seed)
# Augment features
X_aug = np.zeros((X.shape[0], X.shape[1]*(N_neig*2+2)))
for w in np.unique(well):
w_idx = np.where(well == w)[0]
X_aug_win = augment_features_window(X[w_idx, :], N_neig)
X_aug_grad = augment_features_gradient(X[w_idx, :], depth[w_idx])
X_aug[w_idx, :] = np.concatenate((X_aug_win, X_aug_grad), axis=1)
# Find padded rows
padded_rows = np.unique(np.where(X_aug[:, 0:7] == np.zeros((1, 7)))[0])
return X_aug, padded_rows
# Augment features
X_aug, padded_rows = augment_features(X, well, depth)
# Initialize model selection methods
lpgo = LeavePGroupsOut(2)
# Generate splits
split_list = []
for train, val in lpgo.split(X, y, groups=data['Well Name']):
hist_tr = np.histogram(y[train], bins=np.arange(len(facies_names)+1)+.5)
hist_val = np.histogram(y[val], bins=np.arange(len(facies_names)+1)+.5)
if np.all(hist_tr[0] != 0) & np.all(hist_val[0] != 0):
split_list.append({'train':train, 'val':val})
# Print splits
for s, split in enumerate(split_list):
print('Split %d' % s)
print(' training: %s' % (data['Well Name'][split['train']].unique()))
print(' validation: %s' % (data['Well Name'][split['val']].unique()))
# Parameters search grid (uncomment parameters for full grid search... may take a lot of time)
N_grid = [100] # [50, 100, 150]
M_grid = [10] # [5, 10, 15]
S_grid = [25] # [10, 25, 50, 75]
L_grid = [5] # [2, 3, 4, 5, 10, 25]
param_grid = []
for N in N_grid:
for M in M_grid:
for S in S_grid:
for L in L_grid:
param_grid.append({'N':N, 'M':M, 'S':S, 'L':L})
# Train and test a classifier
def train_and_test(X_tr, y_tr, X_v, well_v, clf):
# Feature normalization
scaler = preprocessing.RobustScaler(quantile_range=(25.0, 75.0)).fit(X_tr)
X_tr = scaler.transform(X_tr)
X_v = scaler.transform(X_v)
# Train classifier
clf.fit(X_tr, y_tr)
# Test classifier
y_v_hat = clf.predict(X_v)
# Clean isolated facies for each well
for w in np.unique(well_v):
y_v_hat[well_v==w] = medfilt(y_v_hat[well_v==w], kernel_size=5)
return y_v_hat
# For each set of parameters
# score_param = []
# for param in param_grid:
# # For each data split
# score_split = []
# for split in split_list:
# # Remove padded rows
# split_train_no_pad = np.setdiff1d(split['train'], padded_rows)
# # Select training and validation data from current split
# X_tr = X_aug[split_train_no_pad, :]
# X_v = X_aug[split['val'], :]
# y_tr = y[split_train_no_pad]
# y_v = y[split['val']]
# # Select well labels for validation data
# well_v = well[split['val']]
# # Train and test
# y_v_hat = train_and_test(X_tr, y_tr, X_v, well_v, param)
# # Score
# score = f1_score(y_v, y_v_hat, average='micro')
# score_split.append(score)
# # Average score for this param
# score_param.append(np.mean(score_split))
# print('F1 score = %.3f %s' % (score_param[-1], param))
# # Best set of parameters
# best_idx = np.argmax(score_param)
# param_best = param_grid[best_idx]
# score_best = score_param[best_idx]
# print('\nBest F1 score = %.3f %s' % (score_best, param_best))
param_best = {'S': 25, 'M': 10, 'L': 5, 'N': 100}
# Load data from file
test_data = pd.read_csv('../validation_data_nofacies.csv')
# Prepare test data
well_ts = test_data['Well Name'].values
depth_ts = test_data['Depth'].values
X_ts = test_data[feature_names].values
y_pred = []
print('o' * 100)
for seed in range(100):
np.random.seed(seed)
# Make training data.
X_train, padded_rows = augment_features(X, well, depth, seed=seed)
y_train = y
X_train = np.delete(X_train, padded_rows, axis=0)
y_train = np.delete(y_train, padded_rows, axis=0)
param = param_best
clf = OneVsOneClassifier(RandomForestClassifier(n_estimators=param['N'], criterion='entropy',
max_features=param['M'], min_samples_split=param['S'], min_samples_leaf=param['L'],
class_weight='balanced', random_state=seed), n_jobs=-1)
# Make blind data.
X_test, _ = augment_features(X_ts, well_ts, depth_ts, seed=seed, pe=False)
# Train and test.
y_ts_hat = train_and_test(X_train, y_train, X_test, well_ts, clf)
# Collect result.
y_pred.append(y_ts_hat)
print('.', end='')
np.save('100_realizations.npy', y_pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Data inspection
Step3: Feature imputation
Step4: Feature augmentation
Step5: Generate training, validation and test data splits
Step6: Classification parameters optimization
Step7: Predict labels on test data
|
10,311
|
<ASSISTANT_TASK:>
Python Code:
# Učitaj osnovne biblioteke...
import sklearn
import mlutils
import numpy as np
import matplotlib.pyplot as plt
%pylab inline
from collections import Counter
class VotingClassifierDIY(object):
SCHEME_COUNTING = "counting"
SCHEME_AVERAGING = "averaging"
def __init__(self, clfs, voting_scheme=SCHEME_COUNTING):
# Vaš kôd ovdje
pass
def fit(self, X, y):
# Vaš kôd ovdje
pass
def predict_proba(self, X):
# Vaš kôd ovdje
pass
def predict(self, X):
# Vaš kôd ovdje
pass
from sklearn.datasets import make_classification
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
X_voting, y_voting = make_classification(n_samples=1000, n_features=4, n_redundant=0, n_informative=3, n_classes=3, n_clusters_per_class=2)
# Vaš kôd ovdje
from sklearn.model_selection import train_test_split
X_bag, y_bag = make_classification(n_samples=1000, n_features=20, n_redundant=1, n_informative=17, n_classes=3, n_clusters_per_class=2)
X_bag_train, X_bag_test, y_bag_train, y_bag_test = train_test_split(X_bag, y_bag, train_size=0.7, random_state=69)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
# Vaš kôd ovdje
from sklearn.ensemble import RandomForestClassifier
# Vaš kôd ovdje
from sklearn.datasets import make_circles
circ_X, circ_y = make_circles(n_samples=400, noise=0.1, factor=0.4)
mlutils.plot_2d_clf_problem(circ_X, circ_y)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
# Vaš kôd ovdje
from sklearn.model_selection import train_test_split
X_boost, y_boost = make_classification(n_samples=1000, n_features=20, n_redundant=0, n_informative=18, n_classes=3, n_clusters_per_class=1)
X_boost_train, X_boost_test, y_boost_train, y_boost_test = train_test_split(X_boost, y_boost, train_size=0.7, random_state=69)
# Vaš kôd ovdje
# Vaš kôd ovdje
# Vaš kôd ovdje
# Vaš kôd ovdje
# Vaš kôd ovdje
from scipy.stats import beta
# Vaš kôd ovdje
# Vaš kôd ovdje
# Vaš kôd ovdje
from sklearn.datasets import load_iris
import itertools as it
# Vaš kôd ovdje
from scipy.stats import norm
# Vaš kôd ovdje
# Vaš kôd ovdje
from scipy.stats import pearsonr
# Vaš kôd ovdje
# Vaš kôd ovdje
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Ansambli (glasovanje)
Step2: (b)
Step3: Q
Step4: Razred koji implementira stablo odluke jest tree.DecisionTreeClassifier. Prvo naučite stablo odluke (engl. decision tree) na skupu za učenje, ali tako da je taj model presložen. To možete postići tako da povećate najveću moguću dubinu stabla (parametar max_depth). Ispišite pogrešku na skupu za ispitivanje (pogrešku 0-1; pogledajte paket metrics).
Step5: Sada isprobajte algoritam slučajnih šuma (dostupan u razredu ensemble.RandomForestClassifier) za različit broj stabala $L \in [1, 30]$. Iscrtajte pogrešku na skupu za učenje i na skupu za ispitivanje u ovisnosti o tom hiperparametru. Ispišite najmanju pogrešku na skupu za ispitivanje.
Step6: Q
Step7: (a)
Step8: Q
Step9: Iscrtajte krivulje pogrešaka na skupu za učenje i ispitivanje u ovisnosti o hiperparametru $L \in [1,80]$. Koristite pogrešku 0-1 iz paketa metrics. Ispišite najmanju ostvarenu pogrešku na skupu za ispitivanje, te pripadajuću vrijednost hiperparametra $L$.
Step10: Q
Step11: Q
Step12: (b)
Step13: Q
Step14: Q
Step15: Q
Step16: Q
Step17: (g)
Step18: (h)
Step19: (i)
Step20: Q
Step21: (k)
|
10,312
|
<ASSISTANT_TASK:>
Python Code:
PROJECT = "cloud-training-demos" # Replace with your PROJECT
BUCKET = "cloud-training-bucket" # Replace with your BUCKET
REGION = "us-central1" # Choose an available region for Cloud MLE
TFVERSION = "1.14" # TF version for CMLE to use
import os
os.environ["BUCKET"] = BUCKET
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = TFVERSION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
# Create SQL query using natality data after the year 2000
query_string =
WITH
CTE_hash_cols_fixed AS (
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
year,
month,
CASE
WHEN day IS NULL AND wday IS NULL THEN 0
ELSE
CASE
WHEN day IS NULL THEN wday
ELSE
wday
END
END
AS date,
IFNULL(state,
"Unknown") AS state,
IFNULL(mother_birth_state,
"Unknown") AS mother_birth_state
FROM
publicdata.samples.natality
WHERE
year > 2000)
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(year AS STRING), CAST(month AS STRING), CAST(date AS STRING), CAST(state AS STRING), CAST(mother_birth_state AS STRING))) AS hashvalues
FROM
CTE_hash_cols_fixed
from google.cloud import bigquery
bq = bigquery.Client(project = PROJECT)
df = bq.query("SELECT hashvalues, COUNT(weight_pounds) AS num_babies FROM ("
+ query_string +
") GROUP BY hashvalues").to_dataframe()
print("There are {} unique hashvalues.".format(len(df)))
df.head()
sampling_percentages_query =
WITH
-- Get label, features, and column that we are going to use to split into buckets on
CTE_hash_cols_fixed AS (
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
year,
month,
CASE
WHEN day IS NULL AND wday IS NULL THEN 0
ELSE
CASE
WHEN day IS NULL THEN wday
ELSE
wday
END
END
AS date,
IFNULL(state,
"Unknown") AS state,
IFNULL(mother_birth_state,
"Unknown") AS mother_birth_state
FROM
publicdata.samples.natality
WHERE
year > 2000),
CTE_data AS (
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(year AS STRING), CAST(month AS STRING), CAST(date AS STRING), CAST(state AS STRING), CAST(mother_birth_state AS STRING))) AS hashvalues
FROM
CTE_hash_cols_fixed),
-- Get the counts of each of the unique hashs of our splitting column
CTE_first_bucketing AS (
SELECT
hashvalues,
COUNT(*) AS num_records
FROM
CTE_data
GROUP BY
hashvalues ),
-- Get the number of records in each of the hash buckets
CTE_second_bucketing AS (
SELECT
ABS(MOD(hashvalues, {0})) AS bucket_index,
SUM(num_records) AS num_records
FROM
CTE_first_bucketing
GROUP BY
ABS(MOD(hashvalues, {0}))),
-- Calculate the overall percentages
CTE_percentages AS (
SELECT
bucket_index,
num_records,
CAST(num_records AS FLOAT64) / (
SELECT
SUM(num_records)
FROM
CTE_second_bucketing) AS percent_records
FROM
CTE_second_bucketing ),
-- Choose which of the hash buckets will be used for training and pull in their statistics
CTE_train AS (
SELECT
*,
"train" AS dataset_name
FROM
CTE_percentages
WHERE
bucket_index >= 0
AND bucket_index < {1}),
-- Choose which of the hash buckets will be used for validation and pull in their statistics
CTE_eval AS (
SELECT
*,
"eval" AS dataset_name
FROM
CTE_percentages
WHERE
bucket_index >= {1}
AND bucket_index < {2}),
-- Choose which of the hash buckets will be used for testing and pull in their statistics
CTE_test AS (
SELECT
*,
"test" AS dataset_name
FROM
CTE_percentages
WHERE
bucket_index >= {2}
AND bucket_index < {0}),
-- Union the training, validation, and testing dataset statistics
CTE_union AS (
SELECT
0 AS dataset_id,
*
FROM
CTE_train
UNION ALL
SELECT
1 AS dataset_id,
*
FROM
CTE_eval
UNION ALL
SELECT
2 AS dataset_id,
*
FROM
CTE_test ),
-- Show final splitting and associated statistics
CTE_split AS (
SELECT
dataset_id,
dataset_name,
SUM(num_records) AS num_records,
SUM(percent_records) AS percent_records
FROM
CTE_union
GROUP BY
dataset_id,
dataset_name )
SELECT
*
FROM
CTE_split
ORDER BY
dataset_id
modulo_divisor = 100
train_percent = 80.0
eval_percent = 10.0
train_buckets = int(modulo_divisor * train_percent / 100.0)
eval_buckets = int(modulo_divisor * eval_percent / 100.0)
df = bq.query(sampling_percentages_query.format(modulo_divisor, train_buckets, train_buckets + eval_buckets)).to_dataframe()
df.head()
# Added every_n so that we can now subsample from each of the hash values to get approximately the record counts we want
every_n = 500
train_query = "SELECT * FROM ({0}) WHERE ABS(MOD(hashvalues, {1} * 100)) < 80".format(query_string, every_n)
eval_query = "SELECT * FROM ({0}) WHERE ABS(MOD(hashvalues, {1} * 100)) >= 80 AND ABS(MOD(hashvalues, {1} * 100)) < 90".format(query_string, every_n)
test_query = "SELECT * FROM ({0}) WHERE ABS(MOD(hashvalues, {1} * 100)) >= 90 AND ABS(MOD(hashvalues, {1} * 100)) < 100".format(query_string, every_n)
train_df = bq.query(train_query).to_dataframe()
eval_df = bq.query(eval_query).to_dataframe()
test_df = bq.query(test_query).to_dataframe()
print("There are {} examples in the train dataset.".format(len(train_df)))
print("There are {} examples in the validation dataset.".format(len(eval_df)))
print("There are {} examples in the test dataset.".format(len(test_df)))
train_df.head()
train_df.describe()
import pandas as pd
def preprocess(df):
# Clean up data
# Remove what we don"t want to use for training
df = df[df.weight_pounds > 0]
df = df[df.mother_age > 0]
df = df[df.gestation_weeks > 0]
df = df[df.plurality > 0]
# Modify plurality field to be a string
twins_etc = dict(zip([1,2,3,4,5],
["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)"]))
df["plurality"].replace(twins_etc, inplace = True)
# Now create extra rows to simulate lack of ultrasound
no_ultrasound = df.copy(deep = True)
no_ultrasound.loc[no_ultrasound["plurality"] != "Single(1)", "plurality"] = "Multiple(2+)"
no_ultrasound["is_male"] = "Unknown"
# Concatenate both datasets together and shuffle
return pd.concat([df, no_ultrasound]).sample(frac=1).reset_index(drop=True)
train_df = preprocess(train_df)
eval_df = preprocess(eval_df)
test_df = preprocess(test_df)
train_df.head()
train_df.tail()
train_df.describe()
columns = "weight_pounds,is_male,mother_age,plurality,gestation_weeks".split(',')
train_df.to_csv(path_or_buf = "train.csv", columns = columns, header = False, index = False)
eval_df.to_csv(path_or_buf = "eval.csv", columns = columns, header = False, index = False)
test_df.to_csv(path_or_buf = "test.csv", columns = columns, header = False, index = False)
%%bash
wc -l *.csv
%%bash
head *.csv
%%bash
tail *.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Create ML datasets by sampling using BigQuery
Step3: There are only a limited number of years, months, days, and states in the dataset. Let's see what the hash values are.
Step5: We can make a query to check if our bucketing values result in the correct sizes of each of our dataset splits and then adjust accordingly
Step6: Here's a way to get a well-distributed portion of the data in such a way that the train/eval/test sets do not overlap.
Step7: Preprocess data using Pandas
Step8: Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data)
Step9: It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a preprocess function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect.
Step10: Let's process the train/eval/test set and see a small sample of the training data after our preprocessing
Step11: Let's look again at a summary of the dataset. Note that we only see numeric columns, so plurality does not show up.
Step12: Write to .csv files
|
10,313
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
# pandas is a software library for data manipulation and analysis
# We commonly use shorter nicknames for certain packages. Pandas is often abbreviated to pd.
# hit shift + enter to run this cell or block of code
path = r'./chopstick-effectiveness.csv'
# Change the path to the location where the chopstick-effectiveness.csv file is located on your computer.
# If you get an error when running this block of code, be sure the chopstick-effectiveness.csv is located at the path on your computer.
dataFrame = pd.read_csv(path)
dataFrame
dataFrame['Food.Pinching.Efficiency'].mean()
meansByChopstickLength = dataFrame.groupby('Chopstick.Length')['Food.Pinching.Efficiency'].mean().reset_index()
meansByChopstickLength
# reset_index() changes Chopstick.Length from an index to column. Instead of the index being the length of the chopsticks, the index is the row numbers 0, 1, 2, 3, 4, 5.
# Causes plots to display within the notebook rather than in a new window
%pylab inline
import matplotlib.pyplot as plt
plt.scatter(x=meansByChopstickLength['Chopstick.Length'], y=meansByChopstickLength['Food.Pinching.Efficiency'])
# title="")
plt.xlabel("Length in mm")
plt.ylabel("Efficiency in PPPC")
plt.title("Average Food Pinching Efficiency by Chopstick Length")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's do a basic statistical calculation on the data using code! Run the block of code below to calculate the average "Food Pinching Efficiency" for all 31 participants and all chopstick lengths.
Step2: This number is helpful, but the number doesn't let us know which of the chopstick lengths performed best for the thirty-one male junior college students. Let's break down the data by chopstick length. The next block of code will generate the average "Food Pinching Effeciency" for each chopstick length. Run the block of code below.
Step3: 5. Which chopstick length performed the best for the group of thirty-one male junior college students?
|
10,314
|
<ASSISTANT_TASK:>
Python Code:
# loading libraries and reading the data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# set seaborn theme if you prefer
sns.set(style="white")
# read data
market_df = pd.read_csv("./global_sales_data/market_fact.csv")
customer_df = pd.read_csv("./global_sales_data/cust_dimen.csv")
product_df = pd.read_csv("./global_sales_data/prod_dimen.csv")
shipping_df = pd.read_csv("./global_sales_data/shipping_dimen.csv")
orders_df = pd.read_csv("./global_sales_data/orders_dimen.csv")
market_df.head()
# merging with the Orders data to get the Date column
df = pd.merge(market_df, orders_df, how='inner', on='Ord_id')
df.head()
# Now we have the Order_Date in the df
# It is stored as a string (object) currently
df.info()
# Convert Order_Date to datetime type
df['Order_Date'] = pd.to_datetime(df['Order_Date'])
# Order_Date is now datetime type
df.info()
# aggregating total sales on each day
time_df = df.groupby('Order_Date')['Sales'].sum()
print(time_df.head())
print(type(time_df))
# time series plot
# figure size
plt.figure(figsize=(16, 8))
# tsplot
sns.tsplot(data=time_df)
plt.show()
# extracting month and year from date
# extract month
df['month'] = df['Order_Date'].dt.month
# extract year
df['year'] = df['Order_Date'].dt.year
df.head()
# grouping by year and month
df_time = df.groupby(["year", "month"]).Sales.mean()
df_time.head()
plt.figure(figsize=(8, 6))
# time series plot
sns.tsplot(df_time)
plt.xlabel("Time")
plt.ylabel("Sales")
plt.show()
# Pivoting the data using 'month'
year_month = pd.pivot_table(df, values='Sales', index='year', columns='month', aggfunc='mean')
year_month.head()
# figure size
plt.figure(figsize=(12, 8))
# heatmap with a color map of choice
sns.heatmap(year_month, cmap="YlGnBu")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualising Simple Time Series Data
Step2: Since the Order_Date variable is in the orders dataframe, let's merge it.
Step3: Since Order_Date is a string, we need to convert it into a datetime object. You can do that using pd.to_datetime().
Step4: Now, since on each day, multiple orders were placed, we need to aggregate Sales using a metric such as mean, median etc., and then create a time series plot.
Step5: We can now create a time-series plot using sns.tsplot().
Step6: Using Derived Date Metrics for Visualisation
Step7: Now you can plot the average sales across years and months.
Step8: There is another way to visualise numeric variables, such as Sales, across the year and month. We can pivot the month column to create a wide-format dataframe, and then plot a heatmap.
Step9: You can now create a heatmap using sns.heatmap().
|
10,315
|
<ASSISTANT_TASK:>
Python Code:
import autofig
import numpy as np
import phoebe # PHOEBE 2.1.0
#autofig.inline()
times = np.linspace(0,1,21)
# create an "interesting" system in PHOEBE
b = phoebe.default_binary()
b.set_value('incl@orbit', 75)
b.set_value('q', 0.5)
b.set_value('requiv@secondary', 0.6)
# add datasets and compute the model
b.add_dataset('orb', times=times)
b.add_dataset('rv', times=times, datset='rv01')
b.add_dataset('mesh', times=[], include_times=['rv01'], columns=['rvs@rv01'])
b.run_compute(irrad_method='none')
autofig.reset()
for c in ['primary', 'secondary']:
xs = b.get_value(component=c, qualifier='us', context='model', kind='orb')
ys = b.get_value(component=c, qualifier='vs', context='model', kind='orb')
zs = b.get_value(component=c, qualifier='ws', context='model', kind='orb')
rvs = b.get_value(component=c, qualifier='rvs', context='model', kind='rv')
# plot the orbit with RV as the color
autofig.plot(xs, ys, zs, i=times,
xlabel='x', xunit='solRad',
ylabel='y', yunit='solRad',
c=rvs, cmap='bwr', clabel='rv', cunit='solRad/d',
s=0.03, highlight=False,
uncover=True, trail=0.3,
linestyle='solid', marker='none')
for t in times:
verts = b.get_value(time=t, component=c, qualifier='uvw_elements', context='model')
rvs = b.get_value(time=t, component=c, qualifier='rvs', context='model')
xs = verts[:, :, 0]
ys = verts[:, :, 1]
zs = verts[:, :, 2]
# plot the mesh at this time, with RV as facecolor
autofig.mesh(x=xs, y=ys, z=zs, i=t,
xlabel='x', xunit='solRad',
ylabel='y', yunit='solRad',
fc=rvs, fcmap='bwr', fclim='symmetric', fclabel='rv', fcunit='solRad/d',
ec='none')
mplfig = autofig.draw(i=times[5], save='phoebe_meshes_3d.png')
autofig.gcf().axes.pad_aspect = False
autofig.gcf().axes.projection = '3d'
autofig.gcf().axes.elev.value = [0, 30]
autofig.gcf().axes.azim.value = [-75, 0]
anim = autofig.animate(i=times, tight_layout=False,
save='phoebe_meshes_3d.gif', save_kwargs={'writer': 'imagemagick'})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's set the projection to '3d', set the range for the viewing angles and disable pad_aspect (as it doesn't play nicely with animations).
|
10,316
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
print(int_words[:100])
print(len(int_words))
from collections import Counter
import random
def train_word(word_list, t):
wordCountDict= Counter(int_words)
wordFrenDict={}
F_word = {}
for word, count in wordCountDict.items():
wordFrenDict[word] = count/total_count
for word in word_counts:
F_word[word] = 1 - np.sqrt(t / wordFrenDict[word])
return F_word
## Your code here
word_set = train_word(int_words, 0.00005)
train_words = []
train_words = [word for word in int_words if random.random() < (1 - word_set[word])]
print(train_words[:10])
import random
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
rand = np.random.randint(1,window_size+1)
begin = idx - rand if (idx - rand) > 0 else 0
end = idx + rand
target = list(set(words[begin:idx] + words[idx+1:end+1]))
#print(target)
return target
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name='inputs')
labels = tf.placeholder(tf.int32, [None, None], name='labels')
n_vocab = len(int_to_vocab)
n_embedding = 250# Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs)
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b,
labels, embed,
n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
10,317
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
import itertools
import os
import time
import six
import numpy as np
import pandas as pd
import sklearn
from sklearn import model_selection
from sklearn import linear_model
from sklearn import metrics
import xgboost as xgb
# restart your notebook if prompted on Colab
try:
import verta
except ImportError:
!pip install verta
import os
# Ensure credentials are set up, if not, use below
# os.environ['VERTA_EMAIL'] =
# os.environ['VERTA_DEV_KEY'] =
# os.environ['VERTA_HOST'] =
from verta import Client
PROJECT_NAME = "Census"
EXPERIMENT_NAME = "sklearn + xgboost"
client = Client(os.environ['VERTA_HOST'])
proj = client.set_project(PROJECT_NAME)
expt = client.set_experiment(EXPERIMENT_NAME)
try:
import wget
except ImportError:
!pip install wget # you may need pip3
import wget
train_data_url = "http://s3.amazonaws.com/verta-starter/census-train.csv"
train_data_filename = wget.detect_filename(train_data_url)
if not os.path.isfile(train_data_filename):
wget.download(train_data_url)
test_data_url = "http://s3.amazonaws.com/verta-starter/census-test.csv"
test_data_filename = wget.detect_filename(test_data_url)
if not os.path.isfile(test_data_filename):
wget.download(test_data_url)
df_train = pd.read_csv(train_data_filename)
X_train_hpw = df_train.drop(columns=["hours-per-week", ">50k"]) # predict hours per week
y_train_hpw = df_train["hours-per-week"]
X_train_income = df_train.drop(columns=["hours-per-week", ">50k"])
y_train_income = df_train[">50k"]
X_train_income.head()
hyperparam_candidates = {
'C': [1e-6, 1e-4],
'solver': ['lbfgs'],
'max_iter': [15, 28],
}
hyperparam_sets = [dict(zip(hyperparam_candidates.keys(), values))
for values
in itertools.product(*hyperparam_candidates.values())]
def run_experiment(hyperparams, X_train, y_train):
# create object to track experiment run
run = client.set_experiment_run()
# create validation split
(X_val_train, X_val_test,
y_val_train, y_val_test) = model_selection.train_test_split(X_train, y_train,
test_size=0.2,
shuffle=True)
# log hyperparameters
run.log_hyperparameters(hyperparams)
print(hyperparams, end=' ')
# create and train model
model = linear_model.LogisticRegression(**hyperparams)
model.fit(X_train, y_train)
# calculate and log validation accuracy
val_acc = model.score(X_val_test, y_val_test)
run.log_metric("val_acc", val_acc)
print("Validation accuracy: {:.4f}".format(val_acc))
# NOTE: run_experiment() could also be defined in a module, and executed in parallel
for hyperparams in hyperparam_sets:
run_experiment(hyperparams, X_train_hpw, y_train_hpw)
best_run = expt.expt_runs.sort("metrics.val_acc", descending=True)[0]
print("Validation Accuracy: {:.4f}".format(best_run.get_metric("val_acc")))
best_hyperparams = best_run.get_hyperparameters()
print("Hyperparameters: {}".format(best_hyperparams))
model = linear_model.LogisticRegression(multi_class='auto', **best_hyperparams)
model.fit(X_train_hpw, y_train_hpw)
train_acc = model.score(X_train_hpw, y_train_hpw)
predicted_hpw = model.predict(X_train_hpw)
print("Training accuracy: {:.4f}".format(train_acc))
pd.DataFrame(predicted_hpw, index=X_train_hpw.index)
X_train_income_prediction = pd.concat([X_train_income,
pd.DataFrame(
predicted_hpw, columns=["predicted_hpw"],
index=X_train_income.index)], axis=1)
dtrain = xgb.DMatrix(X_train_income_prediction, label=y_train_income)
grid = model_selection.ParameterGrid({
'eta': [0.5, 0.7],
'max_depth': [1, 2, 3],
'num_class': [10],
})
EXPERIMENT_NAME = "XGBoost"
client.set_experiment(EXPERIMENT_NAME)
def run_experiment(hyperparams, X_train, y_train):
run = client.set_experiment_run()
# log hyperparameters
run.log_hyperparameters(hyperparams)
# run cross validation on hyperparameters
cv_history = xgb.cv(hyperparams, dtrain,
nfold=5,
metrics=("merror", "mlogloss"))
# log observations from each iteration
for _, iteration in cv_history.iterrows():
for obs, val in iteration.iteritems():
run.log_observation(obs, val)
# log error from final iteration
final_val_error = iteration['test-merror-mean']
run.log_metric("val_error", final_val_error)
print("{} Mean error: {:.4f}".format(hyperparams, final_val_error))
# NOTE: run_experiment() could also be defined in a module, and executed in parallel
for hyperparams in grid:
run_experiment(
hyperparams, X_train_income_prediction.to_numpy(), y_train_income.to_numpy())
income_model = xgb.XGBClassifier(**best_hyperparams)
income_model.fit(X_train_income_prediction.to_numpy(), y_train_income.to_numpy())
registered_model = client.get_or_create_registered_model(
name="census", labels=["xgboost", "sklearn"])
hpw_model = model
from verta.registry import VertaModelBase
class CensusTwoStep(VertaModelBase):
def __init__(self, artifacts):
import cloudpickle
self.hpw_model = cloudpickle.load(
open(artifacts["hpw_model"], "rb"))
self.income_model = cloudpickle.load(
open(artifacts["income_model"], "rb"))
def predict(self, batch_input):
import numpy as np
results = []
for one_input in batch_input:
output = self.hpw_model.predict(one_input)
output = np.concatenate((np.array(one_input), np.reshape(output, (-1,1))), axis=1)
output = self.income_model.predict(output)
results.append(output)
return results
import cloudpickle
cloudpickle.dump(income_model, open("income_model.pkl", "wb"))
cloudpickle.dump(hpw_model, open("hpw_model.pkl", "wb"))
my_model = CensusTwoStep(
{
"hpw_model" : "hpw_model.pkl",
"income_model" : "income_model.pkl"
})
my_model.predict([X_train_hpw.values.tolist()[:5]])
from verta.environment import Python
model_version = registered_model.create_standard_model(
model_cls=CensusTwoStep,
environment=Python(requirements=["sklearn", "xgboost"]),
artifacts={
"hpw_model" : hpw_model,
"income_model" : income_model
},
name="v6"
)
model_version
census_multiple_endpoint = client.get_or_create_endpoint("census-multiple")
census_multiple_endpoint.update(model_version, wait=True)
deployed_model = census_multiple_endpoint.get_deployed_model()
deployed_model.predict([X_train_hpw.values.tolist()[:5]])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0.1 Verta import and setup
Step2: 1. Model training
Step3: 1.2 Define hyperparams
Step4: 1.3 Train/test code
Step5: 2. Register Model for deployment
Step6: 3. Deploy model to endpoint
|
10,318
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy.random as nr
import numpy as np
from matplotlib import pyplot as plt
the_mean=75
the_sigma=20.
numpoints=1000
outRandom=nr.normal(the_mean,the_sigma,[numpoints,])
outRandom=outRandom[outRandom <= 100.]
outRandom=outRandom[outRandom >= 0.]
#
# histogram these 1000 grades into 1000 bins with uniform width
#
fig,ax = plt.subplots(1,1)
bin_edges=np.linspace(0,100,21,endpoint=True)
ax.hist(outRandom,bins=bin_edges)
_=ax.set(title='Grade distribution',xlabel='mark (%)',ylabel='Number in bin')
#
# make a dictionary to hold the grade boundaries
#
bounds={'a+':90,'a':85,'a-':80,'b+':76,'b':72,'b-':68,'c+':64,'c':60,'c-':55,'d':50}
bounds=list(bounds.values())
bounds.sort()
#
# add the high and low edges
#
bounds.insert(0,0)
bounds.append(100)
fig,ax = plt.subplots(1,1)
out=ax.hist(outRandom,bins=bounds)
_=ax.set(title='Grade distribution UBC',xlabel='mark (%)',ylabel='Number in bin')
fig,ax = plt.subplots(1,1)
out=ax.hist(outRandom,bins=bounds,normed=True)
fig,ax = plt.subplots(1,1)
counts,edges=np.histogram(outRandom,bins=bounds)
widths=np.diff(edges)
counts_dens=counts/widths
left_edge = edges[:-1]
out=ax.bar(left_edge,counts_dens,width=widths)
_=ax.set(title='Grade density histogram UBC',xlabel='mark (%)',ylabel='Number in bin/(%)')
from a301utils.a301_readfile import download
download('photon_data.csv')
bin_wavelengths = np.loadtxt('photon_data.csv')
bin_wavelengths[:10]
total_counts = len(bin_wavelengths)
total_flux = total_counts*0.01
total_flux
fig,ax = plt.subplots(1,1)
#
# 51 edges from 0.1 to 60 microns
#
edges = np.linspace(0.1,60,51)
counts,edges=np.histogram(bin_wavelengths,bins=edges)
widths=np.diff(edges)
counts_dens=counts/widths/total_counts*total_flux
left_edge = edges[:-1]
out=ax.bar(left_edge,counts_dens,width=widths)
_=ax.set(title='Flux density ($W/m^2/\mu m$)',xlabel='wavelength ($\mu m$)',ylabel='$E_\lambda\ (W/m^2/\mu m)$')
#
# put the Planck curve on top of this
#
from a301lib.radiation import planckwavelen
Temp=300 #Kelvin
Elambda = planckwavelen(edges*1.e-6,Temp)*1.e-6 #convert from W/m^2/m to W/m^2/micron
ax.plot(edges,Elambda,linewidth=4,label='Planck curve')
_=ax.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Remove all grades below 0 or above 100
Step2: Note that UBC has grade boundaries that narrow for higher marks. Just counting the number in each bin and plotting it is seriously misleading if you expect the area of the bin to be proportional to the number in the bin.
Step3: matplotlib accepts a "normed=True" flag that divides by the total number and the bin width, so that the histogram area integrates to 1.
Step4: If we want to put numbers/(bin width) on the y axis, we need to first do the histogram then divide the counts in each bin by the bin width before plotting
Step5: An idealized satellite sensor
Step6: 2) read the file in using np.loadtxt
Step7: The total $W/m^2$ is just the number measurements multiplied by 0.01 Joules for each measurement
Step8: Now histogram the photon counts
|
10,319
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import pylab as plt
# Set the global default size of matplotlib figures
plt.rc('figure', figsize=(10, 5))
# Size of matplotlib figures that contain subplots
fizsize_with_subplots = (10, 10)
# Size of matplotlib histogram bins
bin_size = 10
df_train = pd.read_csv('../data/titanic/train.csv')
df_train.head()
df_train.tail()
df_train.dtypes
df_train.info()
df_train.describe()
# Set up a grid of plots
fig = plt.figure(figsize=fizsize_with_subplots)
fig_dims = (3, 2)
# Plot death and survival counts
plt.subplot2grid(fig_dims, (0, 0))
df_train['Survived'].value_counts().plot(kind='bar',
title='Death and Survival Counts')
# Plot Pclass counts
plt.subplot2grid(fig_dims, (0, 1))
df_train['Pclass'].value_counts().plot(kind='bar',
title='Passenger Class Counts')
# Plot Sex counts
plt.subplot2grid(fig_dims, (1, 0))
df_train['Sex'].value_counts().plot(kind='bar',
title='Gender Counts')
plt.xticks(rotation=0)
# Plot Embarked counts
plt.subplot2grid(fig_dims, (1, 1))
df_train['Embarked'].value_counts().plot(kind='bar',
title='Ports of Embarkation Counts')
# Plot the Age histogram
plt.subplot2grid(fig_dims, (2, 0))
df_train['Age'].hist()
plt.title('Age Histogram')
pclass_xt = pd.crosstab(df_train['Pclass'], df_train['Survived'])
pclass_xt
# Normalize the cross tab to sum to 1:
pclass_xt_pct = pclass_xt.div(pclass_xt.sum(1).astype(float), axis=0)
pclass_xt_pct.plot(kind='bar',
stacked=True,
title='Survival Rate by Passenger Classes')
plt.xlabel('Passenger Class')
plt.ylabel('Survival Rate')
sexes = sorted(df_train['Sex'].unique())
genders_mapping = dict(zip(sexes, range(0, len(sexes) + 1)))
genders_mapping
df_train['Sex_Val'] = df_train['Sex'].map(genders_mapping).astype(int)
df_train.head()
sex_val_xt = pd.crosstab(df_train['Sex_Val'], df_train['Survived'])
sex_val_xt_pct = sex_val_xt.div(sex_val_xt.sum(1).astype(float), axis=0)
sex_val_xt_pct.plot(kind='bar', stacked=True, title='Survival Rate by Gender')
# Get the unique values of Pclass:
passenger_classes = sorted(df_train['Pclass'].unique())
for p_class in passenger_classes:
print 'M: ', p_class, len(df_train[(df_train['Sex'] == 'male') &
(df_train['Pclass'] == p_class)])
print 'F: ', p_class, len(df_train[(df_train['Sex'] == 'female') &
(df_train['Pclass'] == p_class)])
# Plot survival rate by Sex
females_df = df_train[df_train['Sex'] == 'female']
females_xt = pd.crosstab(females_df['Pclass'], df_train['Survived'])
females_xt_pct = females_xt.div(females_xt.sum(1).astype(float), axis=0)
females_xt_pct.plot(kind='bar',
stacked=True,
title='Female Survival Rate by Passenger Class')
plt.xlabel('Passenger Class')
plt.ylabel('Survival Rate')
# Plot survival rate by Pclass
males_df = df_train[df_train['Sex'] == 'male']
males_xt = pd.crosstab(males_df['Pclass'], df_train['Survived'])
males_xt_pct = males_xt.div(males_xt.sum(1).astype(float), axis=0)
males_xt_pct.plot(kind='bar',
stacked=True,
title='Male Survival Rate by Passenger Class')
plt.xlabel('Passenger Class')
plt.ylabel('Survival Rate')
df_train[df_train['Embarked'].isnull()]
# Get the unique values of Embarked
embarked_locs = sorted(df_train['Embarked'].unique())
embarked_locs_mapping = dict(zip(embarked_locs,
range(0, len(embarked_locs) + 1)))
embarked_locs_mapping
df_train['Embarked_Val'] = df_train['Embarked'] \
.map(embarked_locs_mapping) \
.astype(int)
df_train.head()
df_train['Embarked_Val'].hist(bins=len(embarked_locs), range=(0, 3))
plt.title('Port of Embarkation Histogram')
plt.xlabel('Port of Embarkation')
plt.ylabel('Count')
plt.show()
if len(df_train[df_train['Embarked'].isnull()] > 0):
df_train.replace({'Embarked_Val' :
{ embarked_locs_mapping[nan] : embarked_locs_mapping['S']
}
},
inplace=True)
embarked_locs = sorted(df_train['Embarked_Val'].unique())
embarked_locs
embarked_val_xt = pd.crosstab(df_train['Embarked_Val'], df_train['Survived'])
embarked_val_xt_pct = \
embarked_val_xt.div(embarked_val_xt.sum(1).astype(float), axis=0)
embarked_val_xt_pct.plot(kind='bar', stacked=True)
plt.title('Survival Rate by Port of Embarkation')
plt.xlabel('Port of Embarkation')
plt.ylabel('Survival Rate')
# Set up a grid of plots
fig = plt.figure(figsize=fizsize_with_subplots)
rows = 2
cols = 3
col_names = ('Sex_Val', 'Pclass')
for portIdx in embarked_locs:
for colIdx in range(0, len(col_names)):
plt.subplot2grid((rows, cols), (colIdx, portIdx - 1))
df_train[df_train['Embarked_Val'] == portIdx][col_names[colIdx]] \
.value_counts().plot(kind='bar')
df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked_Val'], prefix='Embarked_Val')], axis=1)
df_train[df_train['Age'].isnull()][['Sex', 'Pclass', 'Age']].head()
# To keep Age in tact, make a copy of it called AgeFill
# that we will use to fill in the missing ages:
df_train['AgeFill'] = df_train['Age']
# Populate AgeFill
df_train['AgeFill'] = df_train['AgeFill'] \
.groupby([df_train['Sex_Val'], df_train['Pclass']]) \
.apply(lambda x: x.fillna(x.median()))
len(df_train[df_train['AgeFill'].isnull()])
# Set up a grid of plots
fig, axes = plt.subplots(2, 1, figsize=fizsize_with_subplots)
# Histogram of AgeFill segmented by Survived
df1 = df_train[df_train['Survived'] == 0]['Age']
df2 = df_train[df_train['Survived'] == 1]['Age']
max_age = max(df_train['AgeFill'])
axes[0].hist([df1, df2],
bins=max_age / bin_size,
range=(1, max_age),
stacked=True)
axes[0].legend(('Died', 'Survived'), loc='best')
axes[0].set_title('Survivors by Age Groups Histogram')
axes[0].set_xlabel('Age')
axes[0].set_ylabel('Count')
# Scatter plot Survived and AgeFill
axes[1].scatter(df_train['Survived'], df_train['AgeFill'])
axes[1].set_title('Survivors by Age Plot')
axes[1].set_xlabel('Survived')
axes[1].set_ylabel('Age')
for pclass in passenger_classes:
df_train.AgeFill[df_train.Pclass == pclass].plot(kind='kde')
plt.title('Age Density Plot by Passenger Class')
plt.xlabel('Age')
plt.legend(('1st Class', '2nd Class', '3rd Class'), loc='best')
# Set up a grid of plots
fig = plt.figure(figsize=fizsize_with_subplots)
fig_dims = (3, 1)
# Plot the AgeFill histogram for Survivors
plt.subplot2grid(fig_dims, (0, 0))
survived_df = df_train[df_train['Survived'] == 1]
survived_df['AgeFill'].hist(bins=max_age / bin_size, range=(1, max_age))
# Plot the AgeFill histogram for Females
plt.subplot2grid(fig_dims, (1, 0))
females_df = df_train[(df_train['Sex_Val'] == 0) & (df_train['Survived'] == 1)]
females_df['AgeFill'].hist(bins=max_age / bin_size, range=(1, max_age))
# Plot the AgeFill histogram for first class passengers
plt.subplot2grid(fig_dims, (2, 0))
class1_df = df_train[(df_train['Pclass'] == 1) & (df_train['Survived'] == 1)]
class1_df['AgeFill'].hist(bins=max_age / bin_size, range=(1, max_age))
df_train['FamilySize'] = df_train['SibSp'] + df_train['Parch']
df_train.head()
df_train['FamilySize'].hist()
plt.title('Family Size Histogram')
# Get the unique values of Embarked and its maximum
family_sizes = sorted(df_train['FamilySize'].unique())
family_size_max = max(family_sizes)
df1 = df_train[df_train['Survived'] == 0]['FamilySize']
df2 = df_train[df_train['Survived'] == 1]['FamilySize']
plt.hist([df1, df2],
bins=family_size_max + 1,
range=(0, family_size_max),
stacked=True)
plt.legend(('Died', 'Survived'), loc='best')
plt.title('Survivors by Family Size')
df_train.dtypes[df_train.dtypes.map(lambda x: x == 'object')]
df_train = df_train.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'],
axis=1)
df_train = df_train.drop(['Age', 'SibSp', 'Parch', 'PassengerId', 'Embarked_Val'], axis=1)
df_train.dtypes
train_data = df_train.values
train_data
def clean_data(df, drop_passenger_id):
# Get the unique values of Sex
sexes = sorted(df['Sex'].unique())
# Generate a mapping of Sex from a string to a number representation
genders_mapping = dict(zip(sexes, range(0, len(sexes) + 1)))
# Transform Sex from a string to a number representation
df['Sex_Val'] = df['Sex'].map(genders_mapping).astype(int)
# Get the unique values of Embarked
embarked_locs = sorted(df['Embarked'].unique())
# Generate a mapping of Embarked from a string to a number representation
embarked_locs_mapping = dict(zip(embarked_locs,
range(0, len(embarked_locs) + 1)))
# Transform Embarked from a string to dummy variables
df = pd.concat([df, pd.get_dummies(df['Embarked'], prefix='Embarked_Val')], axis=1)
# Fill in missing values of Embarked
# Since the vast majority of passengers embarked in 'S': 3,
# we assign the missing values in Embarked to 'S':
if len(df[df['Embarked'].isnull()] > 0):
df.replace({'Embarked_Val' :
{ embarked_locs_mapping[nan] : embarked_locs_mapping['S']
}
},
inplace=True)
# Fill in missing values of Fare with the average Fare
if len(df[df['Fare'].isnull()] > 0):
avg_fare = df['Fare'].mean()
df.replace({ None: avg_fare }, inplace=True)
# To keep Age in tact, make a copy of it called AgeFill
# that we will use to fill in the missing ages:
df['AgeFill'] = df['Age']
# Determine the Age typical for each passenger class by Sex_Val.
# We'll use the median instead of the mean because the Age
# histogram seems to be right skewed.
df['AgeFill'] = df['AgeFill'] \
.groupby([df['Sex_Val'], df['Pclass']]) \
.apply(lambda x: x.fillna(x.median()))
# Define a new feature FamilySize that is the sum of
# Parch (number of parents or children on board) and
# SibSp (number of siblings or spouses):
df['FamilySize'] = df['SibSp'] + df['Parch']
# Drop the columns we won't use:
df = df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
# Drop the Age column since we will be using the AgeFill column instead.
# Drop the SibSp and Parch columns since we will be using FamilySize.
# Drop the PassengerId column since it won't be used as a feature.
df = df.drop(['Age', 'SibSp', 'Parch'], axis=1)
if drop_passenger_id:
df = df.drop(['PassengerId'], axis=1)
return df
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
# Training data features, skip the first column 'Survived'
train_features = train_data[:, 1:]
# 'Survived' column values
train_target = train_data[:, 0]
# Fit the model to our training data
clf = clf.fit(train_features, train_target)
score = clf.score(train_features, train_target)
"Mean accuracy of Random Forest: {0}".format(score)
df_test = pd.read_csv('../data/titanic/test.csv')
df_test.head()
# Data wrangle the test set and convert it to a numpy array
df_test = clean_data(df_test, drop_passenger_id=False)
test_data = df_test.values
# Get the test data features, skipping the first column 'PassengerId'
test_x = test_data[:, 1:]
# Predict the Survival values for the test data
test_y = clf.predict(test_x)
df_test['Survived'] = test_y
df_test[['PassengerId', 'Survived']] \
.to_csv('../data/titanic/results-rf.csv', index=False)
from sklearn import metrics
from sklearn.cross_validation import train_test_split
# Split 80-20 train vs test data
train_x, test_x, train_y, test_y = train_test_split(train_features,
train_target,
test_size=0.20,
random_state=0)
print (train_features.shape, train_target.shape)
print (train_x.shape, train_y.shape)
print (test_x.shape, test_y.shape)
clf = clf.fit(train_x, train_y)
predict_y = clf.predict(test_x)
from sklearn.metrics import accuracy_score
print ("Accuracy = %.2f" % (accuracy_score(test_y, predict_y)))
from IPython.core.display import Image
Image(filename='../data/confusion_matrix.png', width=800)
model_score = clf.score(test_x, test_y)
print ("Model Score %.2f \n" % (model_score))
confusion_matrix = metrics.confusion_matrix(test_y, predict_y)
print ("Confusion Matrix ", confusion_matrix)
print (" Predicted")
print (" | 0 | 1 |")
print (" |-----|-----|")
print (" 0 | %3d | %3d |" % (confusion_matrix[0, 0],
confusion_matrix[0, 1]))
print ("Actual |-----|-----|")
print (" 1 | %3d | %3d |" % (confusion_matrix[1, 0],
confusion_matrix[1, 1]))
print (" |-----|-----|")
from sklearn.metrics import classification_report
print(classification_report(test_y,
predict_y,
target_names=['Not Survived', 'Survived']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Explore the Data
Step2: View the data types of each column
Step3: Type 'object' is a string for pandas, which poses problems with machine learning algorithms. If we want to use these as features, we'll need to convert these to number representations.
Step4: Age, Cabin, and Embarked are missing values. Cabin has too many missing values, whereas we might be able to infer values for Age and Embarked.
Step5: Now that we have a general idea of the data set contents, we can dive deeper into each column. We'll be doing exploratory data analysis and cleaning data to setup 'features' we'll be using in our machine learning algorithms.
Step6: Next we'll explore various features to view their impact on survival rates.
Step7: Plot the cross tab
Step8: We can see that passenger class seems to have a significant impact on whether a passenger survived. Those in First Class the highest chance for survival.
Step9: Transform Sex from a string to a number representation
Step10: Plot a normalized cross tab for Sex_Val and Survived
Step11: The majority of females survived, whereas the majority of males did not.
Step12: Plot survival rate by Sex and Pclass
Step13: The vast majority of females in First and Second class survived. Males in First class had the highest chance for survival.
Step14: Prepare to map Embarked from a string to a number representation
Step15: Transform Embarked from a string to a number representation to prepare it for machine learning algorithms
Step16: Plot the histogram for Embarked_Val
Step17: Since the vast majority of passengers embarked in 'S'
Step18: Verify we do not have any more NaNs for Embarked_Val
Step19: Plot a normalized cross tab for Embarked_Val and Survived
Step20: It appears those that embarked in location 'C'
Step21: Leaving Embarked as integers implies ordering in the values, which does not exist. Another way to represent Embarked without ordering is to create dummy variables
Step22: Feature
Step23: Determine the Age typical for each passenger class by Sex_Val. We'll use the median instead of the mean because the Age histogram seems to be right skewed.
Step24: Ensure AgeFill does not contain any missing values
Step25: Plot a normalized cross tab for AgeFill and Survived
Step26: Unfortunately, the graphs above do not seem to clearly show any insights. We'll keep digging further.
Step27: When looking at AgeFill density by Pclass, we see the first class passengers were generally older then second class passengers, which in turn were older than third class passengers. We've determined that first class passengers had a higher survival rate than second class passengers, which in turn had a higher survival rate than third class passengers.
Step28: In the first graph, we see that most survivors come from the 20's to 30's age ranges and might be explained by the following two graphs. The second graph shows most females are within their 20's. The third graph shows most first class passengers are within their 30's.
Step29: Plot a histogram of FamilySize
Step30: Plot a histogram of AgeFill segmented by Survived
Step31: Based on the histograms, it is not immediately obvious what impact FamilySize has on survival. The machine learning algorithms might benefit from this feature.
Step32: Drop the columns we won't use
Step33: Drop the following columns
Step34: Convert the DataFrame to a numpy array
Step35: Data Wrangling Summary
Step36: Random Forest
Step37: Fit the training data and create the decision trees
Step38: Random Forest
Step39: Note the test data does not contain the column 'Survived', we'll use our trained model to predict these values.
Step40: Take the decision trees and run it on the test data
Step41: Random Forest
Step42: Evaluate Model Accuracy
Step43: Use the new training data to fit the model, predict, and get the accuracy score
Step44: View the Confusion Matrix
Step45: Get the model score and confusion matrix
Step46: Display the classification report
|
10,320
|
<ASSISTANT_TASK:>
Python Code:
prices = {
'ACME': 45.23,
'AAPL': 612.78,
'IBM': 205.55,
'HPQ': 37.20,
'FB': 10.75
}
# Make a dictionary of all prices over 200
p1 = {key: value for key, value in prices.items() if value > 200}
p1
# Make a dictionary of tech stocks
tech_names = {'AAPL', 'IBM', 'HPQ', 'MSFT'}
p2 = {key: value for key, value in prices.items() if key in tech_names}
p2
p1 = dict((key, value) for key, value in prices.items() if value > 200)
p1
# Make a dictionary of tech stocks
tech_names = {'AAPL', 'IBM', 'HPQ', 'MSFT'}
p2 = {key:prices[key] for key in prices.keys() & tech_names}
p2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 讨论
Step2: 但是,字典推导方式表意更清晰,并且实际上也会运行的更快些 (在这个例子中,实际测试几乎比 dcit() 函数方式快整整一倍)。
|
10,321
|
<ASSISTANT_TASK:>
Python Code:
#Set up the model and vector that we are using in the comparison
from gensim.similarities.index import AnnoyIndexer
from gensim.models.word2vec import Word2Vec
model = Word2Vec.load("/tmp/leemodel")
model.init_sims()
vector = model.syn0norm[0]
annoy_index = AnnoyIndexer(model, 500)
%%time
#Traditional implementation:
model.most_similar([vector], topn=5)
%%time
#Annoy implementation:
neighbors = model.most_similar([vector], topn=5, indexer=annoy_index)
for neighbor in neighbors:
print neighbor
# import modules & set up logging
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Load the model
import gensim
model = gensim.models.Word2Vec.load('/tmp/leemodel')
print model
from gensim.similarities.index import AnnoyIndexer
# 100 trees are being used in this example
annoy_index = AnnoyIndexer(model,100)
# Derive the vector for the word "army" in our model
vector = model["army"]
# The instance of AnnoyIndexer we just created is passed
approximate_neighbors = model.most_similar([vector], topn=5, indexer=annoy_index)
# Neatly print the approximate_neighbors and their corresponding cosine similarity values
for neighbor in approximate_neighbors:
print neighbor
%matplotlib inline
import matplotlib.pyplot as plt, time
x_cor = []
y_cor = []
for x in range(200):
start_time = time.time()
AnnoyIndexer(model, x)
y_cor.append(time.time()-start_time)
x_cor.append(x)
plt.plot(x_cor, y_cor)
plt.title("num_trees vs initalization time")
plt.ylabel("Initialization time (s)")
plt.xlabel("num_tress")
plt.show()
exact_results = [element[0] for element in model.most_similar([model.syn0norm[0]], topn=100)]
x_axis = []
y_axis = []
for x in range(1,30):
annoy_index = AnnoyIndexer(model, x)
approximate_results = model.most_similar([model.syn0norm[0]],topn=100, indexer=annoy_index)
top_words = [result[0] for result in approximate_results]
x_axis.append(x)
y_axis.append(len(set(top_words).intersection(exact_results)))
plt.plot(x_axis, y_axis)
plt.title("num_trees vs accuracy")
plt.ylabel("% accuracy")
plt.xlabel("num_trees")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A similarity query using Annoy is significantly faster than using the traditional brute force method
Step2: Making a Similarity Query
Step3: Creating an indexer
Step4: Now that we are ready to make a query, lets find the top 5 most similar words to "army" in the lee corpus. To make a similarity query we call Word2Vec.most_similar like we would traditionally, but with an added parameter, indexer. The only supported indexer in gensim as of now is Annoy.
Step5: Analyzing the results
Step6: Initialization time of the annoy indexer increases in a linear fashion with num_trees. Initialization time will vary from corpus to corpus, in the graph above the lee corpus was used
|
10,322
|
<ASSISTANT_TASK:>
Python Code:
import logging
from conf import LisaLogging
LisaLogging.setup()
# Generate plots inline
%matplotlib inline
import json
import os
# Support to access the remote target
import devlib
from env import TestEnv
from executor import Executor
# RTApp configurator for generation of PERIODIC tasks
from wlgen import RTA, Ramp
# Support for trace events analysis
from trace import Trace
# Setup target configuration
my_conf = {
# Target platform and board
"platform" : 'linux',
"board" : 'juno',
"host" : '192.168.0.1',
"password" : 'juno',
# Folder where all the results will be collected
"results_dir" : "TraceAnalysis_FunctionsProfiling",
# Define devlib modules to load
"modules": ['cpufreq'],
"exclude_modules" : [ 'hwmon' ],
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"functions" : [
"pick_next_task_fair",
"select_task_rq_fair",
"enqueue_task_fair",
"update_curr_fair",
"dequeue_task_fair",
],
"buffsize" : 100 * 1024,
},
# Tools required by the experiments
"tools" : [ 'trace-cmd', 'rt-app' ],
# Comment this line to calibrate RTApp in your own platform
# "rtapp-calib" : {"0": 360, "1": 142, "2": 138, "3": 352, "4": 352, "5": 353},
}
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False, force_new=True)
target = te.target
def experiment(te):
# Create and RTApp RAMP task
rtapp = RTA(te.target, 'ramp', calibration=te.calibration())
rtapp.conf(kind='profile',
params={
'ramp' : Ramp(
start_pct = 60,
end_pct = 20,
delta_pct = 5,
time_s = 0.5).get()
})
# FTrace the execution of this workload
te.ftrace.start()
rtapp.run(out_dir=te.res_dir)
te.ftrace.stop()
# Collect and keep track of the trace
trace_file = os.path.join(te.res_dir, 'trace.dat')
te.ftrace.get_trace(trace_file)
# Collect and keep track of the Kernel Functions performance data
stats_file = os.path.join(te.res_dir, 'trace.stats')
te.ftrace.get_stats(stats_file)
# Dump platform descriptor
te.platform_dump(te.res_dir)
experiment(te)
# Base folder where tests folder are located
res_dir = te.res_dir
logging.info('Content of the output folder %s', res_dir)
!tree {res_dir}
with open(os.path.join(res_dir, 'platform.json'), 'r') as fh:
platform = json.load(fh)
print json.dumps(platform, indent=4)
logging.info('LITTLE cluster max capacity: %d',
platform['nrg_model']['little']['cpu']['cap_max'])
trace = Trace(res_dir, platform=platform)
# Get the DataFrame for the specified list of kernel functions
df = trace.data_frame.functions_stats(['enqueue_task_fair', 'dequeue_task_fair'])
df
# Get the DataFrame for the single specified kernel function
df = trace.data_frame.functions_stats('select_task_rq_fair')
df
# Plot Average and Total execution time for the specified
# list of kernel functions
trace.analysis.functions.plotProfilingStats(
functions = [
'select_task_rq_fair',
'enqueue_task_fair',
'dequeue_task_fair'
],
metrics = [
# Average completion time per CPU
'avg',
# Total execution time per CPU
'time',
]
)
# Plot Average execution time for the single specified kernel function
trace.analysis.functions.plotProfilingStats(
functions = 'update_curr_fair',
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import required modules
Step2: Target Configuration
Step3: Workload Execution and Functions Profiling Data Collection
Step4: Parse Trace and Profiling Data
Step5: Report Functions Profiling Data
Step6: Plot Functions Profiling Data
|
10,323
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow_datasets as tfds
import tensorflow as tf
tfds.disable_progress_bar()
import matplotlib.pyplot as plt
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
dataset, info = tfds.load('imdb_reviews', with_info=True,
as_supervised=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
train_dataset.element_spec
for example, label in train_dataset.take(1):
print('text: ', example.numpy())
print('label: ', label.numpy())
BUFFER_SIZE = 10000
BATCH_SIZE = 64
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
for example, label in train_dataset.take(1):
print('texts: ', example.numpy()[:3])
print()
print('labels: ', label.numpy()[:3])
VOCAB_SIZE = 1000
encoder = tf.keras.layers.TextVectorization(
max_tokens=VOCAB_SIZE)
encoder.adapt(train_dataset.map(lambda text, label: text))
vocab = np.array(encoder.get_vocabulary())
vocab[:20]
encoded_example = encoder(example)[:3].numpy()
encoded_example
for n in range(3):
print("Original: ", example[n].numpy())
print("Round-trip: ", " ".join(vocab[encoded_example[n]]))
print()
model = tf.keras.Sequential([
encoder,
tf.keras.layers.Embedding(
input_dim=len(encoder.get_vocabulary()),
output_dim=64,
# Use masking to handle the variable sequence lengths
mask_zero=True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
print([layer.supports_masking for layer in model.layers])
# predict on a sample text without padding.
sample_text = ('The movie was cool. The animation and the graphics '
'were out of this world. I would recommend this movie.')
predictions = model.predict(np.array([sample_text]))
print(predictions[0])
# predict on a sample text with padding
padding = "the " * 2000
predictions = model.predict(np.array([sample_text, padding]))
print(predictions[0])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
history = model.fit(train_dataset, epochs=10,
validation_data=test_dataset,
validation_steps=30)
test_loss, test_acc = model.evaluate(test_dataset)
print('Test Loss:', test_loss)
print('Test Accuracy:', test_acc)
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
plot_graphs(history, 'accuracy')
plt.ylim(None, 1)
plt.subplot(1, 2, 2)
plot_graphs(history, 'loss')
plt.ylim(0, None)
sample_text = ('The movie was cool. The animation and the graphics '
'were out of this world. I would recommend this movie.')
predictions = model.predict(np.array([sample_text]))
model = tf.keras.Sequential([
encoder,
tf.keras.layers.Embedding(len(encoder.get_vocabulary()), 64, mask_zero=True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1)
])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
history = model.fit(train_dataset, epochs=10,
validation_data=test_dataset,
validation_steps=30)
test_loss, test_acc = model.evaluate(test_dataset)
print('Test Loss:', test_loss)
print('Test Accuracy:', test_acc)
# predict on a sample text without padding.
sample_text = ('The movie was not good. The animation and the graphics '
'were terrible. I would not recommend this movie.')
predictions = model.predict(np.array([sample_text]))
print(predictions)
plt.figure(figsize=(16, 6))
plt.subplot(1, 2, 1)
plot_graphs(history, 'accuracy')
plt.subplot(1, 2, 2)
plot_graphs(history, 'loss')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Text classification with an RNN
Step2: Import matplotlib and create a helper function to plot graphs
Step3: Setup input pipeline
Step4: Initially this returns a dataset of (text, label pairs)
Step5: Next shuffle the data for training and create batches of these (text, label) pairs
Step6: Create the text encoder
Step7: The .adapt method sets the layer's vocabulary. Here are the first 20 tokens. After the padding and unknown tokens they're sorted by frequency
Step8: Once the vocabulary is set, the layer can encode text into indices. The tensors of indices are 0-padded to the longest sequence in the batch (unless you set a fixed output_sequence_length)
Step9: With the default settings, the process is not completely reversible. There are three main reasons for that
Step10: Create the model
Step11: Please note that Keras sequential model is used here since all the layers in the model only have single input and produce single output. In case you want to use stateful RNN layer, you might want to build your model with Keras functional API or model subclassing so that you can retrieve and reuse the RNN layer states. Please check Keras RNN guide for more details.
Step12: To confirm that this works as expected, evaluate a sentence twice. First, alone so there's no padding to mask
Step13: Now, evaluate it again in a batch with a longer sentence. The result should be identical
Step14: Compile the Keras model to configure the training process
Step15: Train the model
Step16: Run a prediction on a new sentence
Step17: Stack two or more LSTM layers
|
10,324
|
<ASSISTANT_TASK:>
Python Code:
class EpsGreedy:
def __init__(self, n_arms, eps=0):
self.eps = eps
self.n_arms = n_arms
self.payoffs = np.zeros(n_arms)
self.n_plays = np.zeros(n_arms)
def play(self):
# Note that the theory tells us to pick epsilon as O(1/t), not constant (which we use here).
idx = np.argmin(self.n_plays)
if self.n_plays[idx] == 0:
return idx
if np.random.rand() <= self.eps:
return np.random.randint(self.n_arms)
else:
return np.argmax(self.payoffs / self.n_plays)
def feedback(self, arm, reward):
self.payoffs[arm] += reward
self.n_plays[arm] += 1
class UCB:
def __init__(self, n_arms, tau):
self.n_arms = n_arms
self.means = np.zeros(n_arms)
# Note that the UCB1 algorithm has tau=1.
self.n_plays = np.zeros(n_arms)
self.tau = tau
self.t = 0
def play(self, plot=True):
# If plot is true, it will plot the means + bounds every 100 iterations.
self.t += 1
idx = np.argmin(self.n_plays)
if self.n_plays[idx] == 0:
return idx
ub = self.tau * np.sqrt(2 * np.log(self.t) / self.n_plays)
ucb = self.means + ub
if plot and self.t % 100 == 0:
plt.errorbar(list(range(self.n_arms)), self.means, yerr=ub)
plt.show()
print('chose arm', np.argmax(ucb))
return np.argmax(ucb)
def feedback(self, arm, reward):
self.n_plays[arm] += 1
self.means[arm] += 1 / (self.n_plays[arm]) * (reward - self.means[arm])
@interact(n_arms=(10, 100, 1), n_rounds=(100, 1000, 10), eps=(0, 1, .01) , tau=(0, 1, .01))
def run(n_arms, n_rounds, eps, tau):
np.random.seed(123)
# Initialize the arm payoffs.
mu = np.random.randn(n_arms)
# Some other strategies for sampling.
# mu = np.random.standard_cauchy(n_arms)
# mu = np.random.gamma(shape=.1, size=(n_arms, 1))
mu = np.abs(mu)
mu /= np.max(mu)
plt.bar(list(range(n_arms)), mu)
plt.xlabel('arms')
plt.ylabel('rewards')
plt.show()
bandits = {
'eps-{0}'.format(eps) : EpsGreedy(n_arms, eps=eps),
'ucb-{0}'.format(tau) : UCB(n_arms, tau=tau)
}
play(bandits, n_rounds, mu)
# Hint: You can also plot the upper bound from UCB1 and see how tight it is.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: UCB1
|
10,325
|
<ASSISTANT_TASK:>
Python Code:
#imports
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import pylab as pl
import numpy as np
%matplotlib inline
#Read in data from source
df_raw = pd.read_csv("../assets/admissions.csv")
print df_raw.head()
df_raw['admit'].count()
df_raw['gpa'].count()
df_raw.shape
rows,columns = df_raw.shape
print(rows)
print(columns)
#function
def summary_table():
#creates a summary table for df_raw using .describe()
df_raw.describe()
return x
print X
df_raw.describe()
df_raw.dropna()
#drops any missing data rows from admissions.csv dataset
#returns 397 observations (complete observation rows) across 4 columns
#3 rows had missing, incomplete, NaN data present
#boxplot for GRE column data
df_raw.boxplot(column = 'gre', return_type = 'axes')
#boxplot for GPA column data
df_raw.boxplot(column = 'gpa', return_type = 'axes')
# distribution plot of 'admit' variable with mean
df_raw.admit.plot(kind = 'density', sharex = False, sharey = False, figsize = (10,4));plt.legend(loc='best')
#
plt.vlines(df_raw.admit.mean(), # Plot black line at mean
ymin=0,
ymax=2.0,
linewidth=4.0)
# distribution plot of 'gre' variable with mean
df_raw.gre.plot(kind = 'density', sharex = False, sharey = False, figsize = (10,4));plt.legend(loc='best')
#
plt.vlines(df_raw.gre.mean(), # Plot black line at mean
ymin=0,
ymax=0.0035,
linewidth=4.0)
# distribution plot of 'gpa' variable with mean
df_raw.gpa.plot(kind = 'density', sharex = False, sharey = False, figsize = (10,4));plt.legend(loc='best')
#
plt.vlines(df_raw.gpa.mean(), # Plot black line at mean
ymin=0,
ymax=1.0,
linewidth=4.0)
# distribution plot of 'prestige' variable with mean
df_raw.prestige.plot(kind = 'density', sharex = False, sharey = False, figsize = (10,4));plt.legend(loc='best')
#
plt.vlines(df_raw.prestige.mean(), # Plot black line at mean
ymin=0,
ymax=0.6,
linewidth=4.0)
# correlation matrix for variables in df_raw
df_raw.corr()
#utilized this stackoverflow.com resource to attempt to impute missing data
#(http://stackoverflow.com/questions/21050426/pandas-impute-nans)
#data imputation for variable 'admit'
#first commented out line of code will not run. Had errors with "keys" in ...df_raw.groupby('keys')...
#df_raw['admit'].fillna(df_raw.groupby('keys')['admit'].transform('mean'), inplace = True)
df_raw['admit'].fillna(df_raw['admit'].mean(), inplace = True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step1: Step 2
Step2: Questions
Step3: Answer
Step4: Question 3. Why would GRE have a larger STD than GPA?
Step5: Question 5. Confirm that you dropped the correct data. How can you tell?
Step6: Question 7. What do this plots show?
Step7: Question 9. If our model had an assumption of a normal distribution would we meet that requirement?
Step8: Question 12. What did you find?
|
10,326
|
<ASSISTANT_TASK:>
Python Code:
## Example from PEP 0255
def fib():
a, b = 0, 1
while 1:
yield b
a, b = b, a + b
gen1 = fib()
# prints the first 10 fibonnaci numbers
for i in range(10):
print(next(gen1), end=', ')
print("\nPassed!")
def nsquared(n):
while True:
yield n ** 2
n = n - 1
if n == 0:
return # correct way to terminate a generator
gen2 = nsquared(10)
for i in gen2:
print(i, end=', ')
try:
next(gen2) # should raise a StopIteration exception
except StopIteration:
print("\nWe hit the the end of the generator, no more elements can be consumed")
except Exception as e:
print("\nOops! Unexpected error", e)
finally:
print("Passed !")
squared_sum1 = sum([i**2 for i in range(1000001)])
print(squared_sum1)
gen3 = nsquared(1000000)
squared_sum2 = sum(gen3)
print(squared_sum2)
assert squared_sum1 == squared_sum1, "Sums are not equal !"
print("Passed !")
gen4 = nsquared(10)
print(gen4)
gen5 = (i**2 for i in range(11))
print(gen5)
# note that the generator has to be reinitialized once it has been consumed
gen4 = nsquared(10)
print(tuple(gen4))
gen4 = nsquared(10)
print(list(gen4))
gen4 = nsquared(10)
print(set(gen4))
print(tuple(i**2 for i in range(11)))
print(list(i**2 for i in range(11)))
print(set(i**2 for i in range(11)))
import numpy as np
print(list(i**2 for i in range(11) if i <=5))
print(list(i**2 if i <=5 else 1 for i in range(11)))
mat = list(i**2 + j**2 if i < j else i + j for i in range(3) for j in range(3))
print(np.array(mat).reshape(3,3))
# Same function, redefined here for clarity
def fib(n):
a, b = 0, 1
count = 0
while 1:
yield b
count += 1
if count == n:
return
a, b = b, a + b
def geom(n):
a = 1
count = 0
while True:
yield a
count += 1
if count == n:
return
a = a * 2
def constant(n):
count = 0
while True:
yield -1
count += 1
if count == n:
return
def master_sequence(n):
g1 = fib(n)
g2 = geom(n)
g3 = constant(n)
count = 0
yield from g1
yield from g2
yield from g3
master_gen = master_sequence(5) # creates a sequence of length 15
print(list(master_gen))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is a generator that yields the infinite Fibonnaci sequence. With every call to fib after the first call, the state of the generator gets updated and the value of b is returned.
Step2: This example shows how you can represent an infinte sequence in Python without using up all the memory in the world. Next, we will look at a more practical example.
Step3: Now, suppose you want to find the sum of squares of the first 1,000,000 (1 million) integers. You don't believe the analytical formula and want to calculate it directly by summing up all the requisite squares of integers. It is not memory efficient to create a list of 1 million integers just to compute a sum. This is where our custom generator comes to our rescue.
Step4: Although both snippets of code give the same result, the implementation with the generator is more scalable since it uses constant memory.
Step5: Both generators and generator expressions can be passed to the tuple, set or list constructors to create equivalent tuples, sets or lists.
Step6: All the rules discussed in the previous sections about conditionals also apply to generator expressions
Step7: Advanced generator stuff
Step8: Now, we define our master generator.
|
10,327
|
<ASSISTANT_TASK:>
Python Code:
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.conv_learner import *
import torch
torch.cuda.is_available()
PATH = '../data/planet/'
ls {PATH}
!ls {PATH}train-jpg/ | wc -l
!ls {PATH}test-jpg/ | wc -l
from fastai.plots import *
def get_1st(path): return glob(f'{path}/*.*')[0]
dc_path = "../data/dogscats/valid/"
list_paths = [get_1st(f"{dc_path}cats"), get_1st(f"{dc_path}dogs")]
plots_from_files(list_paths, titles=["cat", "dog"], maintitle="Single-label classification")
list_paths = [f"{PATH}train-jpg/train_0.jpg", f"{PATH}train-jpg/train_1.jpg"]
titles=["haze primary", "agriculture clear primary water"]
plots_from_files(list_paths, titles=titles, maintitle="Multi-label classification")
from helper.planet import f2
metrics=[f2]
f_model = resnet34
label_csv = f'{PATH}train_v2.csv'
n = len(list(open(label_csv)))-1
val_idxs = get_cv_idxs(n)
def get_data(sz):
tfms = tfms_from_model(f_model, sz, aug_tfms=transforms_top_down, max_zoom=1.05)
return ImageClassifierData.from_csv(PATH, 'train-jpg', label_csv, tfms=tfms,
suffix='.jpg', val_idxs=val_idxs, test_name='test-jpg')
data = get_data(256)
data.classes
x,y = next(iter(data.val_dl))
x.shape
y.shape
sample = to_np(x[0])
sample_denorm = data.val_ds.denorm(sample)[0]
plt.imshow(sample_denorm)
list(zip(data.classes, y[0]))
plt.imshow(data.val_ds.denorm(to_np(x))[0]*1.5);
sz=64
data = get_data(sz)
data = data.resize(int(sz*1.3), 'tmp')
x,y = next(iter(data.trn_dl))
x.shape, y.shape
sample = to_np(x[0])
plt.imshow(data.val_ds.denorm(sample)[0]*1.2)
metrics
learn = ConvLearner.pretrained(f_model, data, metrics=metrics)
lrf=learn.lr_find()
learn.sched.plot()
learn.sched.plot_loss()
def plot_loss_change(sched, sma=1, n_skip=20, y_lim=(-0.01,0.01)):
Plots rate of change of the loss function.
Parameters:
sched - learning rate scheduler, an instance of LR_Finder class.
sma - number of batches for simple moving average to smooth out the curve.
n_skip - number of batches to skip on the left.
y_lim - limits for the y axis.
derivatives = [0] * (sma + 1)
for i in range(1 + sma, len(learn.sched.lrs)):
derivative = (learn.sched.losses[i] - learn.sched.losses[i - sma]) / sma
derivatives.append(derivative)
plt.ylabel("d/loss")
plt.xlabel("learning rate (log scale)")
plt.plot(learn.sched.lrs[n_skip:], derivatives[n_skip:])
plt.xscale('log')
plt.ylim(y_lim)
plot_loss_change(learn.sched, sma=20)
lr = 0.1
learn.fit(lr, 3, cycle_len=1, cycle_mult=2)
lrs = np.array([lr/9,lr/3,lr])
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)
learn.save(f'{sz}')
learn.sched.plot_loss()
learn.sched.plot_lr()
sz=128
learn.set_data(get_data(sz))
learn.freeze()
learn.fit(lr, n_cycle=3, cycle_len=1, cycle_mult=2)
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)
learn.save(f'{sz}')
sz=256
learn.set_data(get_data(sz))
learn.freeze()
learn.fit(lr, 3, cycle_len=1, cycle_mult=2)
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)
learn.save(f'{sz}')
learn.models_path
learn.load(f'{sz}')
multi_preds, y = learn.TTA()
multi_preds.shape, y.shape
multi_preds[:,0,:]
y[0,:]
preds = np.mean(multi_preds, 0)
preds[0,:]
plt.plot(y[0,:])
plt.plot(preds[0,:])
f2(preds,y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multi-label versus single-label classification
Step2: In single-label classification each sample belongs to one class. In the previous example, each image is either a dog or a cat.
Step3: In multi-label classification each sample can belong to one or more clases. In the previous example, the first images belongs to two clases
Step4: We use a different set of data augmentations for this dataset - we also allow vertical flips, since we don't expect vertical orientation of satellite images to change our classifications.
Step5: Begin Training with 64 x 64
Step7: Find Optimal Learning Rate
Step8: Looking at the above plot, we can see that 0.1 seems the best learning rate.
Step9: Unfreeze and Train with differential learning rate
Step10: Begin Training with 128 x 128
Step11: Freeze for Transfer Learning
Step12: F2 Score not improving
Step13: TODO
Step14:
Step15: Sample Predictions
|
10,328
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
vocab = set(text)
vocab_to_int = {v: i for i, v in enumerate(vocab)}
int_to_vocab = {vocab_to_int[v]: v for v in vocab}
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
table = {
'.': '||period||',
',': '||comma||',
'"': '||quotation||',
';': '||semicolon||',
'!': '||exclamation||',
'?': '||question||',
'(': '||leftp||',
')': '||rightp||',
'--': '||dash||',
'\n': '||return||'
}
return table
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
input = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return input, targets, learning_rate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm]*2)
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
import math
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
# embedding layer
embed = get_embed(input_data, vocab_size, embed_dim)
# lstm layer
outputs, final_state = build_rnn(cell, embed)
# fully connected layer
#stddev = 1. / math.sqrt(embed_dim)
logits = tf.contrib.layers.fully_connected(
outputs,
vocab_size,
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1)
)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
words_per_batch = batch_size * seq_length
n_batches = len(int_text) // words_per_batch
features = np.array(int_text[:words_per_batch*n_batches])
targets = np.zeros_like(features)
targets[:-1], targets[-1] = features[1:], features[0]
features = features.reshape((batch_size, -1))
targets = targets.reshape((batch_size, -1))
batches = []
for n in range(0, features.shape[1], seq_length):
# The features of one batch
x = features[:, n:n+seq_length]
# The targets of one batch
y = targets[:, n:n+seq_length]
# Append to batches
batches.append([x, y])
return np.array(batches)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 300
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 256
# Sequence Length
seq_length = 64
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 100
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
input = loaded_graph.get_tensor_by_name('input:0')
initial_state = loaded_graph.get_tensor_by_name('initial_state:0')
final_state = loaded_graph.get_tensor_by_name('final_state:0')
probs = loaded_graph.get_tensor_by_name('probs:0')
return input, initial_state, final_state, probs
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
index = np.argmax(probabilities)
return int_to_vocab[index]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
10,329
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
init_printing(use_latex=True)
from IPython.display import Latex
%matplotlib inline
x, w2 = symbols('x omega^2')
L, m, EJ = symbols('L m EJ', positive = True)
A, B, C, D, ld, LD = symbols('A B C D lambda Lambda')
f, φ = symbols('f phi')
mass_coeff = 8
stiff_coeff = 24
k = stiff_coeff*EJ/L**3
M = mass_coeff*m*L
f0 = A*cos(ld*x) + B*sin(ld*x) + C*cosh(ld*x) + D*sinh(ld*x)
f1 = f0.diff(x)
f2 = f1.diff(x)
f3 = f2.diff(x)
display(Eq(φ,f0))
AC = solve((f0.subs(x,0), f2.subs(x,0)), A, C, dict=True)
f0, f1, f2, f3 = [f.subs(AC[0]) for f in (f0, f1, f2, f3)]
display(Eq(φ, f0))
D = solve(f2.subs(x, L), D, dict=True)
f0, f1, f2, f3 = [f.subs(D[0]).subs(L,LD/ld) for f in (f0, f1, f2, f3)]
display(Latex('With $\\Lambda = \\lambda\\,L$ it is'))
display(Eq(φ, f0.simplify()))
eq = (f0*k - f0*M*ld**4*EJ/m - EJ*f3).subs(x, L).subs(L, LD/ld)
display(Eq(eq.expand().collect(B).collect(ld).collect(EJ), 0))
eq = (eq/EJ/ld**3/B).expand()
display(Eq(eq,0))
display(Eq(symbols('f'), (eq*LD**2).expand()))
plot(eq*LD**2, (LD, 0, 2));
display(Eq(symbols('f'), (eq/LD).expand()))
plot(eq/LD, (LD, 2, 10));
from scipy.optimize import bisect
f = lambdify(LD, eq, modules='math')
l1 = bisect(f, 0.5, 1.5)
Latex(r'$\lambda_1=%.6f\,\frac{1}{L}, \quad\omega_1^2=%.6f\,\frac{EJ}{mL^4}$'%(l1, l1**4))
display(Latex(r'$\omega^2_{R00} = %.3f\,\frac{EJ}{mL^4}$'%(3*24/25)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Symbols
Step2: Supported mass and stiffness of support
Step3: General solution and its derivatives
Step4: Left boundary conditions
Step5: First, simpler boundary condition at the right end, $x=L$.
Step6: Last boundary conditions, equation of wave numbers
Step7: We have a non trivial solution when the term in brackets is equal to zero, to have the bracketed term we must divide both members by $B\,EJ\, \lambda^3$
Step8: The behavior near $\Lambda=0$ is led by the last term that goes like $48/\Lambda^2$, so to have a nice plot we multiply everything by $\Lambda^2$
Step9: and see that there is a root between 1.25 and 1.5. If we were interested in upper roots, we can observe that all the terms in the LHS of our determinantal equations are bounded for increasing $\Lambda$ except for the first one, that grows linearly, so to investigate the other roots we may divide the equation by $\Lambda$ to remove that trend...
Step10: All the RHS terms except the first have $\Lambda$ in the denominator and are bounded, so the asymptotic behaviour is controlled by $\Lambda_{n+1}=n\pi$.
Step11: Rayleigh Quotient
|
10,330
|
<ASSISTANT_TASK:>
Python Code:
Instructions:
+ Print out the second element from the areas list, so 11.25.
+ Subset and print out the last element of areas, being 9.50.
- Using a negative index makes sense here!
+ Select the number representing the area of the living room
and print it out.
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0,
"bedroom", 10.75, "bathroom", 9.50]
# Print out second element from areas
print( areas[ 1 ] )
# Print out last element from areas
print( areas[ -1 ] )
# Print out the area of the living room
print( areas[ 5 ] )
Instructions:
+ Using a combination of list subsetting and variable assignment,
create a new variable, eat_sleep_area, that contains the sum of the area
of the kitchen and the area of the bedroom.
+ Print this new variable "eat_sleep_area".
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75,
"bathroom", 9.50]
# Sum of kitchen and bedroom area: eat_sleep_area
eat_sleep_area = areas[3] + areas[7]
# Print the variable eat_sleep_area
print( eat_sleep_area )
Instructions:
+ Use slicing to create a list, "downstairs", that contains the first
6 elements of "areas".
+ Do a similar thing to create a new variable, "upstairs", that contains
the last 4 elements of areas.
+ Print both "downstairs" and "upstairs" using print().
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom",
10.75, "bathroom", 9.50]
# Use slicing to create downstairs
downstairs = areas[0:7]
# Use slicing to create upstairs
upstairs = areas[6:]
# Print out downstairs and upstairs
print( downstairs )
print( upstairs )
Instructions:
+ Use slicing to create the lists, "downstairs" and "upstairs" again.
- Without using any indexes, unless nessecery.
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Alternative slicing to create downstairs
downstairs = areas[ : 6]
# Alternative slicing to create upstairs
upstairs = areas[ 6 : ]
Problem definition: What will house[-1][1] return?
# Ans : float, 9.5 as the bathroom area.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Subsetting Lists
Step3: 2. Subset and calculate
Step5: 3. Slicing and dicing
Step7: 4. Slicing and dicing (2)
Step9: **5. Subsetting lists of lists
|
10,331
|
<ASSISTANT_TASK:>
Python Code:
# import libraries
import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# percentage of training set to use as validation
valid_size = 0.2
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# obtain training indices that will be used for validation
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# number of epochs to train the model
n_epochs = 50
# initialize tracker for minimum validation loss
valid_loss_min = np.Inf # set initial "min" to infinity
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train() # prep model for training
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
######################
# validate the model #
######################
model.eval() # prep model for evaluation
for data, target in valid_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update running validation loss
valid_loss += loss.item()*data.size(0)
# print training/validation statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
valid_loss = valid_loss/len(valid_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1,
train_loss,
valid_loss
))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'model.pt')
valid_loss_min = valid_loss
model.load_state_dict(torch.load('model.pt'))
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and Visualize the Data
Step2: Visualize a Batch of Training Data
Step3: View an Image in More Detail
Step4: Define the Network Architecture
Step5: Specify Loss Function and Optimizer
Step6: Train the Network
Step7: Load the Model with the Lowest Validation Loss
Step8: Test the Trained Network
Step9: Visualize Sample Test Results
|
10,332
|
<ASSISTANT_TASK:>
Python Code:
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
try:
import cplex
except:
raise Exception('Please install CPLEX. See https://pypi.org/project/cplex/')
import numpy as np
gas_names = ["super", "regular", "diesel"]
gas_data = np.array([[3000, 70, 10, 1], [2000, 60, 8, 2], [1000, 50, 6, 1]])
oil_names = ["crude1", "crude2", "crude3"]
oil_data = np.array([[5000, 45, 12, 0.5], [5000, 35, 6, 2], [5000, 25, 8, 3]])
nb_gas = len(gas_names)
nb_oils = len(oil_names)
range_gas = range(nb_gas)
range_oil = range(nb_oils)
print("Number of gasoline types = {0}".format(nb_gas))
print("Number of crude types = {0}".format(nb_oils))
# global data
production_cost = 4
production_max = 14000
# each $1 spent on advertising increases demand by 10.
advert_return = 10
import pandas as pd
gaspd = pd.DataFrame([(gas_names[i],int(gas_data[i][0]),int(gas_data[i][1]),int(gas_data[i][2]),int(gas_data[i][3]))
for i in range_gas])
oilpd = pd.DataFrame([(oil_names[i],int(oil_data[i][0]),int(oil_data[i][1]),int(oil_data[i][2]),oil_data[i][3])
for i in range_oil])
gaspd.columns = ['name','demand','price','octane','lead']
oilpd.columns= ['name','capacity','price','octane','lead']
CSS =
body {
margin: 0;
font-family: Helvetica;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 0;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: normal;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
table.dataframe thead th:first-child {
display: none;
}
table.dataframe tbody th {
display: none;
}
from IPython.core.display import HTML
HTML('<style>{}</style>'.format(CSS))
from IPython.display import display
print("Gas data:")
display(gaspd)
print("Oil data:")
display(oilpd)
from docplex.mp.model import Model
mdl = Model(name="oil_blending")
blends = mdl.continuous_var_matrix(keys1=nb_oils, keys2=nb_gas, lb=0)
adverts = mdl.continuous_var_list(nb_gas, lb=0)
# gasoline demand is numpy array field #0
mdl.add_constraints(mdl.sum(blends[o, g] for o in range(nb_oils)) == gas_data[g][0] + advert_return * adverts[g]
for g in range(nb_gas))
mdl.print_information()
mdl.add_constraints(mdl.sum(blends[o,g] for g in range_gas) <= oil_data[o][0]
for o in range_oil)
mdl.print_information()
# minimum octane level
# octane is numpy array field #2
mdl.add_constraints(mdl.sum(blends[o,g]*(oil_data[o][2] - gas_data[g][2]) for o in range_oil) >= 0
for g in range_gas)
# maximum lead level
# lead level is numpy array field #3
mdl.add_constraints(mdl.sum(blends[o,g]*(oil_data[o][3] - gas_data[g][3]) for o in range_oil) <= 0
for g in range_gas)
mdl.print_information()
# -- maximum global production
mdl.add_constraint(mdl.sum(blends) <= production_max)
mdl.print_information()
# KPIs
total_advert_cost = mdl.sum(adverts)
mdl.add_kpi(total_advert_cost, "Total advertising cost")
total_oil_cost = mdl.sum(blends[o,g] * oil_data[o][1] for o in range_oil for g in range_gas)
mdl.add_kpi(total_oil_cost, "Total Oil cost")
total_production_cost = production_cost * mdl.sum(blends)
mdl.add_kpi(total_production_cost, "Total production cost")
total_revenue = mdl.sum(blends[o,g] * gas_data[g][1] for g in range(nb_gas) for o in range(nb_oils))
mdl.add_kpi(total_revenue, "Total revenue")
# finally the objective
mdl.maximize(total_revenue - total_oil_cost - total_production_cost - total_advert_cost)
assert mdl.solve(), "Solve failed"
mdl.report()
all_kpis = [(kp.name, kp.compute()) for kp in mdl.iter_kpis()]
kpis_bd = pd.DataFrame(all_kpis, columns=['kpi', 'value'])
blend_values = [ [ blends[o,g].solution_value for g in range_gas] for o in range_oil]
total_gas_prods = [sum(blend_values[o][g] for o in range_oil) for g in range_gas]
prods = list(zip(gas_names, total_gas_prods))
prods_bd = pd.DataFrame(prods)
%matplotlib inline
import matplotlib.pyplot as plt
def display_pie(pie_values, pie_labels, colors=None,title=''):
plt.axis("equal")
plt.pie(pie_values, labels=pie_labels, colors=colors, autopct="%1.1f%%")
plt.title(title)
plt.show()
display_pie( [kpnv[1] for kpnv in all_kpis], [kpnv[0] for kpnv in all_kpis],title='KPIs: Revenue - Oil Cost - Production Cost')
display_pie(total_gas_prods, gas_names, colors=["green", "goldenrod", "lightGreen"],title='Gasoline Total Production')
sblends = [(gas_names[n], oil_names[o], round(blends[o,n].solution_value)) for n in range_gas for o in range_oil]
blends_bd = pd.DataFrame(sblends)
f, barplot = plt.subplots(1, figsize=(16,5))
bar_width = 0.1
offset = 0.12
rho = 0.7
# position of left-bar boundaries
bar_l = [o for o in range_oil]
mbar_w = 3*bar_width+2*max(0, offset-bar_width)
tick_pos = [b*rho + mbar_w/2.0 for b in bar_l]
colors = ['olive', 'lightgreen', 'cadetblue']
for i in range_oil:
barplot.bar([b*rho + (i*offset) for b in bar_l],
blend_values[i], width=bar_width, color=colors[i], label=oil_names[i])
plt.xticks(tick_pos, gas_names)
barplot.set_xlabel("gasolines")
barplot.set_ylabel("blend")
plt.legend(loc="upper right")
plt.title('Blend Repartition\n')
# Set a buffer around the edge
plt.xlim([0, max(tick_pos)+mbar_w +0.5])
plt.show()
print("* value of blend[crude2, diesel] is %g" % blends[1,2])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If CPLEX is not installed, you can install CPLEX Community edition.
Step2: Step 2
Step3: Step 3
Step5: Use basic HTML and a stylesheet to format the data.
Step6: Let's display the data we just prepared.
Step7: Step 4
Step8: Define the decision variables
Step9: We also have to decide how much should be spent in advertising for each time of gasoline. To do so, we will create a list of continuous variables, indexed by the gasolines.
Step10: Express the business constraints
Step11: Maximum capacity
Step12: Octane and Lead levels
Step13: Maximum total production
Step14: Express the objective
Step15: Solve with Decision Optimization
Step16: Step 5
Step17: Let's display some KPIs in pie charts using the Python package matplotlib.
Step18: Production
Step19: We see that the most produced gasoline type is by far regular.
Step20: Notice the missing bar for (crude2, diesel) which is expected since blend[crude2, diesel] is zero in the solution.
|
10,333
|
<ASSISTANT_TASK:>
Python Code:
# Third party python libraries
import numpy
# Try and import from "The Virtual Brain"
from tvb.simulator.lab import *
from tvb.datatypes.time_series import TimeSeriesRegion
import tvb.analyzers.fmri_balloon as bold
from tvb.simulator.plot import timeseries_interactive as timeseries_interactive
LOG.info("Configuring...")
#Initialise a Model, Coupling, and Connectivity.
lb = models.LarterBreakspear(QV_max=1.0, QZ_max=1.0,
d_V=0.65, d_Z=0.65,
aee=0.36, ani=0.4, ane=1.0, C=0.1)
lb.variables_of_interest = ["V", "W", "Z"]
white_matter = connectivity.Connectivity(load_default=True)
white_matter.speed = numpy.array([7.0])
white_matter_coupling = coupling.HyperbolicTangent(a=0.5*lb.QV_max,
midpoint=lb.VT,
sigma=lb.d_V)
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=0.2)
#Initialise some Monitors with period in physical time
mon_tavg = monitors.TemporalAverage(period=2.)
mon_bold = monitors.Bold(period=2000.)
#Bundle them
what_to_watch = (mon_bold, mon_tavg)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = lb,
connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint,
monitors = what_to_watch)
sim.configure()
LOG.info("Starting simulation...")
#Perform the simulation
bold_data, bold_time = [], []
tavg_data, tavg_time = [], []
for raw, tavg in sim(simulation_length=480000):
if not raw is None:
bold_time.append(raw[0])
bold_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
#Make the lists numpy.arrays for easier use.
LOG.info("Converting result to array...")
TAVG_TIME = numpy.array(tavg_time)
BOLD_TIME = numpy.array(bold_time)
BOLD = numpy.array(bold_data)
TAVG = numpy.array(tavg_data)
#Create TimeSeries instance
tsr = TimeSeriesRegion(data = TAVG,
time = TAVG_TIME,
sample_period = 2.)
tsr.configure()
#Create and run the monitor/analyser
bold_model = bold.BalloonModel(time_series = tsr)
bold_data = bold_model.evaluate()
bold_tsr = TimeSeriesRegion(connectivity = white_matter,
data = bold_data.data,
time = bold_data.time)
#Prutty puctures...
tsi = timeseries_interactive.TimeSeriesInteractive(time_series = bold_tsr)
tsi.configure()
tsi.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Perform the simulation
Step2: Plot pretty pictures of what we just did
|
10,334
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.2,<2.3"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
print(b.get_parameter(qualifier='ecc'))
print(b.get_parameter(qualifier='ecosw', context='component'))
print(b.get_parameter(qualifier='esinw', context='component'))
print(b.get_parameter(qualifier='ecosw', context='constraint'))
print(b.get_parameter(qualifier='esinw', context='constraint'))
b.add_dataset('mesh', times=np.linspace(0,1,11), columns=['volume'])
b.set_value('ecc', 0.2)
b.run_compute()
print(b['volume@primary@model'])
afig, mplfig = b['mesh01'].plot(x='times', y='volume', show=True)
b.remove_dataset('mesh01')
b.add_dataset('rv', times=np.linspace(0,1,51))
b.run_compute()
afig, mplfig = b['rv@model'].plot(show=True)
b.remove_dataset('rv01')
b.add_dataset('lc', times=np.linspace(0,1,51))
b.run_compute()
afig, mplfig = b['lc@model'].plot(show=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step2: Relevant Parameters
Step3: Relevant Constraints
Step4: Influence on Meshes (volume conservation)
Step5: Influence on Radial Velocities
Step6: Influence on Light Curves (fluxes)
|
10,335
|
<ASSISTANT_TASK:>
Python Code:
while = 1
for i in range(1,10) print('Hello world')
for i in range(1,10):
x = 1 + i
print(x)
print("Some python issues are easier to find than others ')
print("Missing brackets can be difficult" , range(1,10)
print("to find. probably not these ones however.")
x = int(input("Please enter a number: "))
print(x)
while True:
try:
x = int(input("Please enter a number:"))
break
except ValueError:
print("oops!")
class MyInputError(Exception):
Exception raised for errors in the input.
Attributes:
message -- explanation of the error
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
self.dError = "Internal Error 10"
try:
raise MyInputError("MyInputError")
raise AssertionError("Sample Assertion Error")
except MyInputError as err:
print('InputError caught:',err)
print('dError:',err.dError)
except AssertionError as err:
print('Assertion error caught:',err)
?type
try:
# raise MyInputError("New error, test finally")
#
except MyInputError as err:
print('MyInputError:',err)
print('dError:',err.dError)
finally:
print('Graceful Cleanup: Goodbye, world!')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Missing colon at the end of for, while, if, def statements.
Step2: Incorrect indentation. Don't mix spaces and tabs!
Step3: Mismatched quotation marks.
Step4: Unclosed brackets
Step5: Runtime errors
Step7: User-defined exceptions
Step8: Defining Clean-up Actions
|
10,336
|
<ASSISTANT_TASK:>
Python Code:
initial = [1, 0, 0, 0, 1, 0, 0, 0]
final = [0, 0, 0, 0, 1, 0, 1, 0]
# Importing necessary extensions
import numpy as np
import itertools
import functools
import operator
# The use of type annotations requires Python 3.6 or newer
from typing import List
# looking for the positions/levels with different occupation
changes = np.logical_xor(initial, final)
# obtaining the indexes of those positions
np.nonzero(changes)[0].tolist()
modulo = 2
np.unique(np.remainder(np.nonzero(changes), modulo)).size == 1
def is_allowed(initial: List[int], final: List[int]) -> bool:
Given an initial and final states as represented in a binary
list, returns if it is allowed considering spin conservation.
return np.unique(
np.remainder(
np.nonzero(
np.logical_xor(initial,final)), 2)).size == 1
is_allowed(initial, final)
is_allowed(initial, [0, 0, 0, 0, 0, 1, 1, 0])
def generate_states(electrons: int, levels: int) -> List[List[int]]:
Generates the list representation of a given number of electrons
and levels (degeneracy not considered).
# create an array of length equal to the amount of levels
# with an amount of 1's equal to the number of electrons
# specified which will be used as a seed/template
seed = [1 if position < electrons else 0
for position in range(levels)]
# taking the seed state, we generate all possible permutations
# and remove duplicates using a set operation
generated_states = list(set(itertools.permutations(seed)))
generated_states.sort(reverse=True)
return generated_states
states_d3 = generate_states(3,6)
states_d4 = generate_states(4,6)
states_d5 = generate_states(5,6)
states_d4
possible_states_d4 = [
# select states that fulfill
list(state) for state in states_d4
# dxy orbital double occupancy
if state[0]==1 and state[1]==1
# dzx/dyz orbital single occupancy
and state[2] is not state[3]
]
possible_states_d4
possible_states_d3 = [list(state) for state in states_d3
if state[0]==1 # xy up occupied
or state[1]==1] # xy down occupied
possible_states_d3
possible_states_d5 = [list(state) for state in states_d5
# xy up down occupied
if state[0]==1 and state[1]==1
]
possible_states_d5
def combine_states(first: List[List[int]],
second: List[List[int]]) -> List[List[int]]:
Takes two lists of list representations of states and returns
the list representation of a two-site state.
# Producing all the possible final states.
# This has to be read from bottom to top.
# 3) the single site representations are combined
# into one single two-site representation
# 2) we iterate over all the combinations produced
# 1) make the product of the given first and second
# states lists
final_states = [
functools.reduce(operator.add, combination) # 3)
for combination # 2)
in itertools.product(first, second) # 1)
]
final_states.sort(reverse=True)
return final_states
print("The number of combined states is: ",
len(combine_states(possible_states_d3,possible_states_d5)))
def label(initial, final, levels, mapping):
Helper function to label the levels/orbitals involved.
changes = np.nonzero(np.logical_xor(initial, final))
positions = np.remainder(changes, levels)//2
return f"{mapping[positions[0][0]]} and {mapping[positions[0][1]]}"
def transition(initial: List[int],
final: List[List[int]],
debug = False) -> None:
This function takes the list representation of an initial double
site state and a list of final d3 states of intrest.
Then, it computes if the transition from the given initial state
to a compounded d3d5 final states are possible.
The d5 states are implicitly used in the function from those
already generated and filtered.
def process(final_states):
# We iterate over all final states and test whether the
# transition from the given initial state is allowed
for state in final_states:
allowed = is_allowed(initial, state)
if allowed:
labeled = label(initial,
state,
6,
{0: "xy", 1: "xz", 2: "yz"})
print(f" final state {state} allowed \
between {labeled}.")
else:
if debug:
print(f" final state {state} not allowed.")
d5 = list(possible_states_d5)
print("From initial state {}".format(initial))
print("d3d5")
process(combine_states(final, d5))
print("d5d3")
process(combine_states(d5, final))
A2_32 = [[1,0,1,0,1,0]] # 4A2 Sz=3/2
A2_neg_32 = [[0,1,0,1,0,1]] # 4A2 Sz=-3/2
A2_12 = [[0,1,1,0,1,0], [1,0,0,1,1,0], [1,0,1,0,0,1]] # 4A2 Sz=1/2
A2_neg_12 = [[1,0,0,1,0,1], [0,1,1,0,0,1], [0,1,0,1,1,0]] # 4A2 Sz=-1/2
FM = [1,1,1,0,1,0,1,1,1,0,1,0]
AFM_up = [1,1,1,0,1,0,1,1,0,1,0,1]
AFM_down = [1,1,0,1,0,1,1,1,1,0,1,0]
transition(FM, A2_32)
transition(FM, A2_12)
transition(FM, A2_neg_12)
transition(AFM_up, A2_32)
transition(AFM_up, A2_12)
transition(AFM_up, A2_neg_12)
S0_1 = [1, 1, 1, 0, 0, 1] # |A>
S0_2 = [1, 1, 0, 1, 1, 0] # |B>
d_zero_down = [1, 1, 0, 1, 0, 1]
d_zero_up = [1, 1, 1, 0, 1, 0]
transition(S0_1 + d_zero_up, A2_32)
transition(S0_2 + d_zero_up, A2_32)
print("\n\n")
transition(S0_1 + d_zero_up, A2_neg_32)
transition(S0_2 + d_zero_up, A2_neg_32)
transition(S0_1 + d_zero_down, A2_32)
transition(S0_2 + d_zero_down, A2_32)
print("\n\n")
transition(S0_1 + d_zero_down, A2_neg_32)
transition(S0_2 + d_zero_down, A2_neg_32)
transition(d_zero_up + S0_1, A2_32)
transition(d_zero_up + S0_2, A2_32)
print("\n\n")
transition(d_zero_up + S0_1, A2_neg_32)
transition(d_zero_up + S0_2, A2_neg_32)
print("\n\n")
transition(d_zero_down + S0_1, A2_32)
transition(d_zero_down + S0_2, A2_32)
print("\n\n")
transition(d_zero_down + S0_1, A2_neg_32)
transition(d_zero_down + S0_2, A2_neg_32)
transition(S0_1 + d_zero_up, A2_12)
transition(S0_2 + d_zero_up, A2_12)
transition(S0_1 + d_zero_up, A2_neg_12)
transition(S0_2 + d_zero_up, A2_neg_12)
transition(d_zero_up + S0_1, A2_12)
transition(d_zero_up + S0_2, A2_12)
print("\n\n")
transition(d_zero_up + S0_1, A2_neg_12)
transition(d_zero_up + S0_2, A2_neg_12)
transition(S0_1 + d_zero_down, A2_12)
transition(S0_2 + d_zero_down, A2_12)
print("\n\n")
transition(d_zero_down + S0_1, A2_12)
transition(d_zero_down + S0_2, A2_12)
print("\n\n")
transition(S0_1 + d_zero_down, A2_neg_12)
transition(S0_2 + d_zero_down, A2_neg_12)
print("\n\n")
transition(d_zero_down + S0_1, A2_neg_12)
transition(d_zero_down + S0_2, A2_neg_12)
F = S0_1 + S0_1
G = S0_1 + S0_2
H = S0_2 + S0_1
I = S0_2 + S0_2
transition(F, A2_32)
transition(G, A2_32)
transition(H, A2_32)
transition(I, A2_32)
transition(F, A2_neg_32)
transition(G, A2_neg_32)
transition(H, A2_neg_32)
transition(I, A2_neg_32)
transition(F, A2_12)
transition(G, A2_12)
transition(H, A2_12)
transition(I, A2_12)
transition(F, A2_neg_12)
transition(G, A2_neg_12)
transition(H, A2_neg_12)
transition(I, A2_neg_12)
Ea = [[0,1,1,0,1,0], [1,0,0,1,1,0], [1,0,1,0,0,1]]
transition(AFM_down, Ea)
transition(AFM_up, Ea)
transition(FM, Ea)
Eb = [[1,0,1,0,0,1], [1,0,0,1,1,0]]
transition(AFM_down, Eb)
transition(AFM_up, Eb)
transition(FM, Eb)
transition(S0_1 + S0_1, Ea)
transition(S0_1 + S0_2, Ea)
transition(S0_2 + S0_1, Ea)
transition(S0_2 + S0_2, Ea)
transition(S0_1 + S0_1, Eb)
transition(S0_1 + S0_2, Eb)
transition(S0_2 + S0_1, Eb)
transition(S0_2 + S0_2, Eb)
S0_1 = [1, 1, 1, 0, 0, 1]
S0_2 = [1, 1, 0, 1, 1, 0]
transition(S0_1 + d_zero_up, Ea)
transition(S0_2 + d_zero_up, Ea)
print("\n\n")
transition(d_zero_up + S0_1, Ea)
transition(d_zero_up + S0_2, Ea)
transition(S0_1 + d_zero_up, Eb)
transition(S0_2 + d_zero_up, Eb)
print("\n\n")
transition(d_zero_up + S0_1, Eb)
transition(d_zero_up + S0_2, Eb)
T1_p_xy = [[1,0,1,1,0,0], [1,0,0,0,1,1]]
transition(AFM_down, T1_p_xy)
transition(AFM_up, T1_p_xy)
transition(FM, T1_p_xy)
T1_n_xy = [[0,1,1,1,0,0], [0,1,0,0,1,1]]
transition(AFM_down, T1_n_xy)
transition(AFM_up, T1_n_xy)
transition(FM, T1_n_xy)
T1_p_xz = [[1,1,1,0,0,0], [0,0,1,0,1,1]]
transition(AFM_up, T1_p_xz)
transition(FM, T1_p_xz)
T1_p_yz = [[1,1,0,0,1,0], [0,0,1,1,1,0]]
transition(AFM_up, T1_p_yz)
transition(FM, T1_p_yz)
T1_n_xz = [[1,1,0,1,0,0], [0,0,0,1,1,1]]
transition(AFM_up, T1_n_xz)
transition(FM, T1_n_xz)
T1_n_yz = [[1,1,0,0,0,1], [0,0,1,1,0,1]]
transition(AFM_up, T1_n_yz)
transition(FM, T1_n_yz)
S0_1 = [1, 1, 1, 0, 0, 1]
S0_2 = [1, 1, 0, 1, 1, 0]
T1_p_xz = [[1,1,1,0,0,0], [0,0,1,0,1,1]]
T1_p_yz = [[1,1,0,0,1,0], [0,0,1,1,1,0]]
transition(S0_1 + d_zero_up, T1_p_xz)
transition(S0_2 + d_zero_up, T1_p_xz)
print("\n\n")
transition(S0_1 + d_zero_up, T1_p_yz)
transition(S0_2 + d_zero_up, T1_p_yz)
transition(S0_1 + d_zero_down, T1_p_xz)
transition(S0_2 + d_zero_down, T1_p_xz)
print("\n\n")
transition(S0_1 + d_zero_down, T1_p_yz)
transition(S0_2 + d_zero_down, T1_p_yz)
transition(d_zero_up + S0_1, T1_p_xz)
transition(d_zero_up + S0_2, T1_p_xz)
print("\n\n")
transition(d_zero_up + S0_1, T1_p_yz)
transition(d_zero_up + S0_2, T1_p_yz)
print("\n\n")
print("\n\n")
transition(d_zero_down + S0_1, T1_p_xz)
transition(d_zero_down + S0_2, T1_p_xz)
print("\n\n")
transition(d_zero_down + S0_1, T1_p_yz)
transition(d_zero_down + S0_2, T1_p_yz)
transition(S0_1 + S0_1, T1_p_xz)
transition(S0_1 + S0_2, T1_p_xz)
transition(S0_2 + S0_1, T1_p_xz)
transition(S0_2 + S0_2, T1_p_xz)
print("------------------------")
transition(S0_1 + S0_1, T1_p_yz)
transition(S0_1 + S0_2, T1_p_yz)
transition(S0_2 + S0_1, T1_p_yz)
transition(S0_2 + S0_2, T1_p_yz)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To compute if an optical transition between two states is possible or not, we first get some libraries to make this easier.
Step2: The question is, whether there is a transition matrix element between the aforementioned initial and final states. We can easily anser that with a yes, since the receiving level $\beta,+$ is empty in the initial state and no spin flip is involved when moving the particle from $\alpha,+$ to $\beta,+$. Thus, the question now is how to compute it in a systematic way.
Step3: We can see that we get a change in positions $0$ and $6$ which correspond to $\alpha,+$ and $\beta,+$ in site $i$ and $j$, respectively. Now we apply modulo 2, which will allow us to check if the changes are in even or odd positions mapping even positions to $0$ whereas odd positions to $1$. Thus, if both are even or odd there will be just one unique element in the list otherwise there will be two unique elements.
Step5: Thus, in this case of chosen initial and finals states, the transition is allowed since both are even. We can wraps all of this logic in a function.
Step6: Now we have a function that tells us if between two states an optical transition is possible or not. To recapitulate, we can recompute our previous case and then with a different final state that is not allowed since it involves a spin flip, e.g., [0 0 0 0 0 1 1 0].
Step8: With this preamble, we are equiped to handle more complex cases. Given the chosen computational representation for the states, the normalization coefficients of the states are left out. Thus, one has to take care to keep track of them when constructing properly the transition matrix element in question later on.
Step9: With this we can generate states of 3, 4, and 5 electrons in a 3 level system with degeneracy 2 meaning 6 levels in total.
Step10: We can consider first the $d^4$ states and take a look at them.
Step11: It is quite a list of generated states. But from this whole list, not all states are relevant for the problem at hand. This means that we can reduce the amount of states beforehand by applying the physical constrains we have.
Step12: We obtain 4 different $d^4$ states that fullfill the conditions previously indicated. From the previous list, the first and last elements correspond to states with $S_z=\pm1$ whereas the ones in the middle correspond to the two superimposed states for the $S=0$ state, namely, a magnon. These four states, could have been easily written down by hand, but the power of this approach is evident when generating and selecting the possible states of the $d^3$ configuration.
Step13: In the case of the $d^5$ states, since our ground state has a doule occupied $d_{xy}$ orbital then it has to stay occupied.
Step15: We could generate all $d^3d^5$ combinations and check how many of them there are.
Step18: We already saw in the previous section how we can check if a transition is allowed in our list codification of the states. Here we will make it a function slightly more complex to help us deal with generating final states.
Step19: With this, we can now explore the transitions between the different initial states and final states ($^4A_2$, $^2E$, and $^2T_1$ multiplets for the $d^3$ sector). Concerning the $d^4$ states, as explained in chapter 5, there is the possibility to be in the $S_z=\pm1$ or $S_z=0$. We will cover each one of them in the following.
Step20: whereas the ones for the$|^4A_2,\pm1/2>$
Step21: Notice that the prefactors and signs are missing from this representation, and have to be taken into account when combining all the pieces into the end result.
Step22: Handling the ferromagnetic ordering first, the allowed transitions from the initial state into the $|^4A_2,3/2>$ state are
Step23: Comparing the initial and final states representations and considering the $|^4A_2,3/2>$ prefactor, we obtain that there are two possible transitions with matrix element $t_{xy,xz}$ and $t_{xy,yz}$. Each one is allowed twice from swapping the positions between $d^3$ and $d^5$.
Step24: Thus, for the $|^4A_2,\pm1/2>$ states, there is no allowed transition starting from the FM initial ground state.
Step25: We see that the AFM initial ground state has no transition matrix element for the $|^4A_2,3/2>$ state. Whereas transitions involving the $|^4A_2,\pm1/2>$ state are allowed. Once again, checking the prefactors for the multiplet and the initial ground state we get a transition matrix element of $t_{xy,xz}/\sqrt{3}$ and $t_{xy,yz}/\sqrt{3}$, twice each.
Step26: Thus, we append the $d^4_\uparrow$ representation to each part of the $d^4_0$ states. Then, checking for the transitions into the $|^4A_2,\pm3/2>$ $d^3$ state we get
Step27: Collecting the terms we get that for $|^4A_2, 3/2>$ there is no transitions into a $|d^3>|d^5>$ final state but there are transitions into two different $|d^5>|d^3>$ final states, one for each of the $|A>$ and $|B>$ parts. Thus, considering the numerical factors of the involved states, the amplitude in this case is $\frac{1}{\sqrt{2}}t_{xy,xz}$ and $\frac{1}{\sqrt{2}}t_{xy,yz}$. In this case, the states involved in $|^4A_2, -3/2>$ do not show any allowed transition.
Step28: Here, we observe the same situation than before but swapping the roles between the $|^4A_2,\pm3/2>$ states. This means that the contribution of the $d^0 d^4_\uparrow$ is the same as the $d^0 d^4_\downarrow$ one.
Step29: Following the same procedure for the $|^4A_2, 1/2>$ states and $d^4_0d^4_\uparrow$ ground state
Step30: Here we get some possible transitions to final states of interest. Here, we have to remember that the "receiving" $d3$ multiplet has three terms, which have to be added if present. For the $|d^3>|d^5>$ case there are two allowed transitions into $d^5$ states involving $t_{xy,xz}$ and $t_{xy,yz}$ for $|A>$ and $|B>$. From $|A>$ and $|B>$ we find computed terms that correspond to the same $d^5$ final state that have to be added.
Step31: there is no transition found.
Step32: Which is the same situation than before but swapping the position of the contributions as we already saw for the $|^4A_2, 3/2>$ case. For completeness we show the situation with $d^4_\downarrow$ as follows.
Step33: Continuing with the $d^4_0d^4_0$ the situation gets more complicated since $<f|\hat{t}|d^4_0>|d^4_0>$ can be split as follows $<f|\hat{t}(|A>+|B>)(|A>+|B>)$ which gives 4 terms labeled $F$ to $I$. Thus, we construct the four combinations for the initial state and calculate each one of them to later sum them up.
Step34: First dealing with the $|^4A_2,\pm 3/2>$ states for the $d^3$ sector.
Step35: No transitions from the $d^4_0d^4_0$ state to $|^4A_2,\pm3/2>$.
Step36: Here we have terms for both $|d^3>|d^5>$ and $|d^5>|d^3>$ and for each component of the initial state which can be grouped into which $d^5$ state they transition into. Terms pairs $F-H$ and $G-I$ belong together involving the $d^5_{xz\downarrow}$ and $d^5_{yz\downarrow}$ states, respectively.
Step37: For $|^4A_2,-1/2>$ states we obtain the same values than for $|^4A_2,1/2>$ but involving the other spin state.
Step38: For the $|^2E,a>$ multiplet, only transitions from the AFM ground state are possible. Collecting the prefactors we get that the transition matrix element in $-\sqrt{2/3}t_{xy,xz}$ and $-\sqrt{2/3}t_{xy,yz}$ as could be easily checked by hand.
Step39: From the $S=\pm1$ initial states, no transitions possible to Eb.
Step40: Each one of the combinations is allowed, thus considering the prefactors of the $S_0$ and $|^2E,a>$ we obtain $\sqrt{\frac{2}{3}}t_{xy,xz}$ and $\sqrt{\frac{2}{3}}t_{xy,yz}$.
Step41: Adding all the contributions of the allowed terms we obtain, that due to the - sign in the $|^2E,b>$ multiplet, the contribution is 0.
Step42: and then we add the $d^4_\uparrow$ representation to each one. Thus, for the $|^2E, Ea>$ $d^3$ multiplet we get
Step43: Here, both parts of the $S_z=0$ state contribute. Checking the prefactors for $S_z=0$ ($1/\sqrt{2}$) and $|^2E, Ea>$ ($1/\sqrt{6}$) we get a matrix element $\sqrt{\frac{2}{3}}t_{xy/xz}$.
Step44: $|^2T_1,+/->$
Step45: And for the $|^2T_1,->$
Step46: In this case, there is no possible transition to states with a singly occupied $\textit{xy}$ orbital from the $\textit{xy}$ ordered ground state.
Step47: We can see that the transitions from the ferromagnetic state are forbidden for the $xy$ orbitally ordered ground state for both $|^2T_1, xz\uparrow>$ and $|^2T_1, yz\uparrow>$ while allowing for transitions with amplitudes
Step48: S=0
Step49: First, we calculate for the $d^4_0d^4_\uparrow$ ground state. Again the $d^4_0$ state is split in two parts.
Step50: And for $d^4_0d^4_\downarrow$
Step51: Thus, for final states with singly occupied $\textit{xz}$ multiplet, we obtain transitions involving $t_{yz,xz}/2$, $t_{yz,yz}/2$, $t_{xz,xz}/2$ and $t_{xz,yz}/2$ when accounting for the prefactors of the states.
Step52: In this case, considering the prefactors of the states involved, we obtain contributions $t_{yz,xy}/{2}$ and $t_{yz,yz}/{2}$, $t_{xz,xz}/{2}$, and $t_{xz,yz}/{2}$.
|
10,337
|
<ASSISTANT_TASK:>
Python Code:
import base64
token = base64.b64decode("Njk4ZGVjMWE5Y2YyNDQ5ZmNhY2FkOWU4NDdjMDk5NWU1NTZhMDk5Yw====").decode("utf-8")
! rm -rf tp-datos-2c2020 datos
! git clone https://{token}@github.com/AlexDaciuk/tp-datos-2c2020.git
! mv tp-datos-2c2020 datos
from datos.preproc import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from matplotlib import pyplot as plt
from sklearn.tree import plot_tree
import pandas as pd
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc, roc_auc_score, confusion_matrix
df_all = preprocessing.get_data()
df_forest = preprocessing.rforest_preproc(df_all)
# Separate test and train data
X_train, X_test, y_train, y_test = train_test_split(df_forest.drop('volveria', 1), df_forest['volveria'])
model_rfr = RandomForestClassifier(max_depth=5)
model_rfr.fit(X_train, y_train)
preprocessing.report(model_rfr, X_train, y_train, X_test, y_test)
pred_rfr = model_rfr.predict(X_test)
len(model_rfr.estimators_)
with plt.style.context("classic"):
plt.figure(figsize=(20, 10))
plot_tree(model_rfr.estimators_[0], filled=True)
pred_first_estimator = model_rfr.estimators_[0].predict(X_test)
accuracy_score(y_test, pred_first_estimator)
precision_score(y_test, pred_first_estimator)
recall_score(y_test, pred_first_estimator)
f1_score(y_test, pred_first_estimator)
pred_estimators = [
estimator.predict(X_test) for estimator in model_rfr.estimators_
]
acc_estimators = [accuracy_score(y_test, pred) for pred in pred_estimators]
plt.figure(figsize=(10, 10))
plt.hist(acc_estimators)
plt.xlabel("Accuracy", weight="bold", fontsize=15)
plt.ylabel("Frecuencia", weight="bold", fontsize=15)
plt.title(
"Histograma de accuracy de los arboles del RF model", weight="bold", fontsize=16
)
max(acc_estimators)
min(acc_estimators)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Random Forest Classifier
Step2: Cantidad de árboles entrenados
Step3: Veamos el primero de ellos
Step4: Y sus métricas
|
10,338
|
<ASSISTANT_TASK:>
Python Code:
import os
from ascat.cgls import SWI_TS
import matplotlib.pyplot as plt
%matplotlib inline
ls ../tests/ascat_test_data/cglops/swi_ts
data_path = os.path.join('..', 'tests', 'ascat_test_data', 'cglops', 'swi_ts')
rd = SWI_TS(data_path)
data = rd.read_ts(3002621)
print(data)
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
data[['SWI_001', 'SWI_010']].plot(ax=ax)
ax.set_ylabel('Soil Water Index (%)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: By default we should have the grid file from the SWI-STATIC collection and the unzipped SWI-TS products in one folder like so
Step2: Since the returned value is a pandas.DataFrame we can plot the data easily.
|
10,339
|
<ASSISTANT_TASK:>
Python Code:
initialize()
print(train())
print(train())
initialize()
for layer in [a,b,c]:
layer.weight.data = torch.ones_like(layer.weight)
print(c.weight)
train(), print(c.weight)
train(), print(c.weight)
train(), print(c.weight)
b.weight #there's a small amount of symmetry breaking, but the fan-ins are pretty similar
for _ in range(25):
train()
b.weight #now the fan-ins are pretty different
initialize()
for layer in [a,b,c]:
layer.weight.data = torch.ones_like(layer.weight)
layer.bias.data = torch.ones_like(layer.bias)
print(c.weight, c.bias)
train(), print(c.weight, c.bias)
train(), print(c.weight, c.bias)
train(), print(c.weight, c.bias)
b.weight
for _ in range(25):
train()
b.weight
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Random biases, No randomness in weights
Step2: fan-ins are almost identical across neurons, elements within one neuron's fan-in are different
Step3: after 25 epochs
Step4: No randomness in bias/weights
Step5: fan-ins are identical across neurons, elements within one neuron's fan-in are identical
Step6: after 25 epochs
|
10,340
|
<ASSISTANT_TASK:>
Python Code:
import itertools
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.model_selection import GridSearchCV, StratifiedKFold, cross_val_predict
from sklearn.feature_extraction import stop_words
from sklearn.metrics import classification_report
data_file = 'data/sms.csv'
sms = pd.read_csv(data_file, sep='\t', header=None, names=['label', 'message'])
sms.head()
class PhraseClassifier(BaseEstimator, ClassifierMixin):
'''A rule-based spam classifer, backed by a list of spam phrases.'''
def __init__(self, spam=[], lowercase=False):
'''Initialises the classifier.
Args:
spam: A list of phrases used to identify spam.
lowercase: Whether to convert messages to lowercase before predicting their class.
'''
self.spam = spam
self.lowercase = lowercase
def fit(self, X, y=None):
'''Fits the classifier.
Note: As the classifier is rule-based, this is just a dummy method to ensure
compatability with scikit-learn.
Args:
X: Unused.
y: Unused.
Returns:
The classifier object (self).
'''
return self
def predict(self, X, y=None):
'''Predicts the classes of the given messages.
Args:
X: List of messages to classify.
y: Unused.
Returns:
A list of classifications, corresponding to the given messages.
'''
results = []
for message in X:
message = message.lower() if self.lowercase else message
cls = self.spam_or_ham(message)
results.append(cls)
return results
def spam_or_ham(self, message):
'''Classifies the given message as spam or ham.
Args:
message: The message to classify.
Returns:
The predicted class of the message: 'spam' or 'ham'.
'''
# Start out assuming we have ham
result = 'ham'
# If any of the phrases in self.spam match, then mark the message as spam
for phrase in self.spam:
if phrase in message:
result = 'spam'
break
return result
def score(self, X, y=None):
'''Computes a score for the given messages and ground truth labels.
Args:
X: List of messages to classify.
y: List of the true classes of the messages.
Returns:
The average number of correct classifications by the model.
'''
y_pred = self.predict(X, y)
return sum([1 if y1 == y2 else 0 for y1, y2 in zip(y, y_pred)]) / len(y)
X = sms['message']
y = sms['label']
clf = PhraseClassifier() # clf is short for classifier
y_pred = clf.predict(X)
print(classification_report(y, y_pred))
clf = PhraseClassifier(spam=['urgent', 'win'])
y_pred = clf.predict(X)
print(classification_report(y, y_pred))
clf = PhraseClassifier(spam=['urgent', 'win'], lowercase=True)
y_pred = clf.predict(X)
print(classification_report(y, y_pred))
clf = PhraseClassifier(spam=['urgent', 'win', 'hi'], lowercase=True)
y_pred = clf.predict(X)
print(classification_report(y, y_pred))
# Create a list of the words in the training set messages that are labelled as spam
spam_messages = X[y == 'spam']
spam_words = [word.lower() for message in spam_messages for word in message.split()]
# Order the spam words by popularity
top_spam_words = pd.Series(spam_words).value_counts().index.tolist()
# Print the top ten
top_spam_words[:10]
top_spam_words = [word for word in top_spam_words if word not in stop_words.ENGLISH_STOP_WORDS]
top_spam_words[:10]
candidates = top_spam_words[:5] # Use the top five words
combinations = [combination for n in range(1, len(candidates)) \
for combination in itertools.combinations(candidates, n)]
print('Total combinations: %d' % len(combinations))
param_grid = {
'spam': combinations, # Try every combination of spam phrases
'lowercase': [True, False] # Try setting lowercase True and False
}
# Use inner CV to select the best model
inner_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0) # K = 5
clf = GridSearchCV(PhraseClassifier(), param_grid=param_grid, cv=inner_cv)
clf.fit(X, y)
# Use outer CV to evaluate the error of the best model
outer_cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=0) # K = 10, doesn't have to be the same
y_pred = cross_val_predict(clf, X, y, cv=outer_cv)
print(classification_report(y, y_pred))
print('Best parameters: %s' % clf.best_params_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, let's load the data. Write the path to your sms.csv file in the cell below
Step2: Execute the cell below to load the CSV data into a pandas data frame with the columns label and message.
Step3: Building a spam classifier
Step4: By convention, when we build a predictive model using scikit-learn, we separate our data into two variables, X and y
Step5: We can now build our first classifier. Let's create one that marks every message as ham to start with (i.e. one with no spam phrases). It won't be very useful, but it will give us a baseline accuracy from which to improve on
Step6: To measure how well our classifier works, let's create a classification report using the true labels for the SMS messages (y) and the predicted labels we've just created (y_pred)
Step7: While we haven't covered some of the terms here in class yet, they're not difficult to understand
Step8: As can be seen, the results have improved
Step9: Again, this has improved our spam detector!
Step10: It would be nice if we choose a set of spam keywords that maximized the performance of our model. We can do this using model selection, i.e. building a set of different candidate models and choosing the best one. This is easy to do with scikit-learn!
Step11: It looks like a lot of the most popular words are ones we commonly use. These aren't good indicators of spam in general, so let's remove them. scikit-learn defines a set of stop words, i.e. words that are so commonly used that they don't indicate anything in particular. Let's remove these from our set of most popular words
Step12: This looks like a better set of words to use. Let's make a list of all the possible different combinations of the most popular of these words (a brute force approach) using Python's itertools.combinations method
Step13: Now, let's define a set of parameters to build models for. The set is just a Python dictionary, where the keys match the arguments of the classifier we're using (i.e. PhraseClassifier) and the values represent different choices that can be made for a particular key
Step14: We can use the GridSearchCV class from scikit-learn to build every possible model defined by the parameters and choose the best one (i.e. to do model selection). Generally, when we use GridSearchCV, we will specify three parameters
|
10,341
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
%matplotlib inline
import swat
s = swat.CAS('cloud.example.com', 5570)
ulresult = s.upload('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.csv')
titanic3 = ulresult.casTable
type(titanic3)
titanic3.table.columnInfo()
# Create a computed variable.
titanic3.computedVars = ['deck'] # 1
titanic3.computedVarsProgram = \
"if cabin ne '' then deck = ksubstr(cabin,1,1); else deck = '';"
numeric=['pclass', 'survived', 'age', 'sibsp', 'parch', 'fare']
# Remove boat and body because they are proxies for survived.
# Remove ticket and cabin. Use the computed column, deck, instead.
char = ['sex', 'deck', 'embarked', 'home.dest']
all = numeric + char
# The numeric variable was defined earlier.
results = titanic3[numeric].groupby("survived").simple.summary()
resultColumns = ['Column', 'Min', 'Max', 'N', 'NMiss', 'Mean', 'Sum', 'Std', 'StdErr'];
display(HTML('<h3>Perished</h3>'))
display(results['ByGroup1.Summary'][resultColumns]) # 1
display(HTML('<h3>Survived</h3>'))
display(results['ByGroup2.Summary'][resultColumns])
s.builtins.loadActionSet('sampling')
# The sampling.stratified action does not accept the vars parameter.
# Instead, copyVars is used to select the columns to copy to the output table.
if 'vars' in titanic3.params:
del titanic3.vars
# Temporarily set a groupBy parameter.
with titanic3:
titanic3.groupBy={'survived'}
titanic3.sampling.stratified(
partInd=True, # 1
samppct=40, # 2
seed=1234,
output={
'casout':{'name':'titanic3part', 'replace':True},
'copyVars':all
}
)
titanic3.table.dropTable() # 3
titanic3part = s.CASTable('titanic3part') # 4
ci = titanic3part.columnInfo()
display(ci)
survSummary = titanic3part['survived'].groupby('_partind_').simple.summary()
resultColumns = ['Column', 'N', 'NMiss', 'Mean', 'Sum', 'Std', 'StdErr']
display(survSummary['ByGroupInfo'])
display(survSummary['ByGroup1.Summary'][resultColumns])
display(survSummary['ByGroup2.Summary'][resultColumns])
s.builtins.loadActionSet('decisionTree') # 1
training = titanic3part.query('0 = _partind_') # 2
trainingResults = training.forestTrain(
target='survived',
inputs=all,
nominals=char + ['pclass', 'survived'],
casOut={'name':'forestModel', 'replace':True},
seed=1234,
binOrder=True,
varImp=True
)
display(trainingResults)
forestModel = s.CASTable('forestModel')
titanic3part.forestScore(
modelTable=forestModel,
copyVars=['survived', '_partind_'],
casOut={'name':'forestScored', 'replace':True},
vote='prob'
)
s.builtins.loadActionSet('percentile')
forestScored = s.CASTable('forestScored') # 1
forestScored.groupBy='_PartInd_' # 2
forestScored.computedVars=['P1', 'P0'] # 3
forestScored.computedVarsProgram='''
if '1' eq strip(_RF_PredName_) then do;
P1 = _RF_PredP_;
P0 = 1 - _RF_PredP_;
end;
else do;
P1 = 1 - _RF_PredP_;
P0 = _RF_PredP_;
end;
''' # 4
forestScored.percentile.assess(
casOut={'name':'forestAssess', 'replace':True},
nbins=10,
cutStep = 0.01,
inputs=['P1'],
response='survived',
event='1',
pVar=['P0'],
pEvent='0'
)
forestAssess_ROC = \
s.CASTable('forestAssess_ROC', where='1 = _partind_') # 1
out2 = forestAssess_ROC.to_frame()
plt.figure(figsize=(8,8))
plt.plot(out2._FPR_,out2._Sensitivity_,'bo-',linewidth=2)
plt.plot(pd.Series(range(0,11,1))/10,pd.Series(range(0,11,1))/10,'k--',linewidth=1)
plt.xlabel('False Positive Rate')
plt.ylabel('Correct Classification Rate')
plt.grid(True)
plt.title('ROC Curve')
plt.show()
forestAssess = \
s.CASTable('forestAssess', where='1 = _partind_') # 1
lift = forestAssess.to_frame()
plt.figure(figsize=(8,8))
plt.plot(lift._Depth_, lift._Lift_,'bo-',linewidth=2)
plt.xlabel('Percentile')
plt.ylabel('Lift')
plt.grid(True)
plt.title('Lift Chart')
plt.show()
s.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the Data
Step2: Add a Computed Column
Step3: Group By Analysis
Step4: Sample the Data
Step5: Check that Sampling is Even
Step6: Train a Model
Step7: Use the Model for Scoring
Step8: Assess the Model
Step9: Plot ROC
Step10: Plot Lift
|
10,342
|
<ASSISTANT_TASK:>
Python Code:
import xgboost
import shap
# load JS visualization code to notebook
shap.initjs()
# train XGBoost model
X,y = shap.datasets.boston()
bst = xgboost.train({"learning_rate": 0.01}, xgboost.DMatrix(X, label=y), 100)
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(bst)
shap_values = explainer.shap_values(X)
# visualize the first prediction's explaination with default colors
shap.force_plot(explainer.expected_value, shap_values[0,:], X.iloc[0,:])
# visualize the first prediction's explaination with a Pink and Yellow Green color map.
shap.force_plot(explainer.expected_value, shap_values[0,:], X.iloc[0,:], plot_cmap="PkYg")
# visualize the first 5 predictions explanations with a dark red dark blue color map.
shap.force_plot(explainer.expected_value, shap_values[0:5,:], X.iloc[0:5,:], plot_cmap="DrDb")
# visualize the first 5 predictions explanations with a custom color map.
shap.force_plot(explainer.expected_value, shap_values[0:5,:], X.iloc[0:5,:], plot_cmap=["#FF5733","#335BFF"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The plot color maps are named based on the color maps on matplotlib.
Step2: The same can be applied to lists of explanations
Step3: The different color map names are available in the color-set.js file. To add one, modify the file with a color map name, and a list containing the two colors of the color map, the first one being the one for positive SHAP values, and the second one for the negative SHAP values. You can also pass a list of colors as the value for the plot_cmap parameter to render the force plot with those colors. Those colors must be hex-coded.
|
10,343
|
<ASSISTANT_TASK:>
Python Code:
import json
import numpy as np
import pandas as pd
from jupyter_scisheets_widget import scisheets_widget
import pandas_datareader as pdr
ibm_data = pdr.get_data_yahoo('IBM')
income_data = pd.read_csv('income_data.csv', sep=';')
income_data
income_data['test_col'] = income_data['2006']
import ast
df = income_data
model_data = df.to_json(orient='split')
model_data = ast.literal_eval(model_data)
model_data_2 = json.dumps(model_data)
tbl._model_data = json.dumps(model_data)
model_data_2
tbl._model_data
%%javascript
var json_model = this.model.get('_model_data')
console.log(json_model)
tbl = scisheets_widget.SciSheetTable()
tbl.load_df(income_data)
tbl
income_data
bob = income_data.copy()
bob['House Party 2'] = 5
income_data
bob
from IPython.display import display
display(tbl)
tbl2._widget._model_data
tbl2 = scisheets_widget.HandsonDataFrame(income_data)
tbl2.show()
scisheets_widget.HandsonDataFrame(income_data)._widget._model_data
import ast
data_dic = {}
data_dic['columns'] = ast.literal_eval(tbl._model_header)
data_dic['index'] = ast.literal_eval(tbl._model_row_header)
data_dic['data'] = ast.literal_eval(tbl._model_data)
pd.read_json(json.dumps(data_dic), orient='split')
income_data.to_json(orient='split')
data_dic
cols = {'columns': tbl._model_header.encode('utf-8')}
ind = {'index': tbl._model_row_header.encode('utf-8')}
data = {'data': tbl._model_data.encode('utf-8')}
z=cols
z.update(ind)
z.update(data)
z
json.dumps(z)
pd.read_json(json.dumps(z))
model_data = df.to_json(orient='split')
model_data = ast.literal_eval(model_data)
self._model_data = json.dumps(model_data['data'])
self._model_header = json.dumps(model_data['columns'])
self._model_row_header = json.dumps(model_data['index'])
tbl1._widget._model_data
tbl1 = scisheets_widget.HandsonDataFrame(income_data)
tbl1.show()
import ipywidgets as widgets
print(widgets.Widget.unobserve.__doc__)
tbl1._on_displayed()
tbl1._widget._model_header
tbl_ibm = scisheets_widget.SciSheetTable()
tbl_ibm.load_df(ibm_data)
tbl_ibm
tbl = scisheets_widget.SciSheetTable()
tbl.load_df(income_data[['GEOID', 'State']])
tbl
%%html
<div id="example1" class="hot handsontable"></div>
<script src="https://docs.handsontable.com/0.34.4/bower_components/handsontable/dist/handsontable.full.js"></script>
<link type="text/css" rel="stylesheet" href="https://docs.handsontable.com/0.34.4/bower_components/handsontable/dist/handsontable.full.min.css">
%%javascript
document.addEventListener("DOMContentLoaded", function() {
var
data1 = [
['', 'Tesla', 'Nissan', 'Toyota', 'Honda', 'Mazda', 'Ford'],
['2017', 10, 11, 12, 13, 15, 16],
['2018', 10, 11, 12, 13, 15, 16],
['2019', 10, 11, 12, 13, 15, 16],
['2020', 10, 11, 12, 13, 15, 16],
['2021', 10, 11, 12, 13, 15, 16]
],
container1 = document.getElementById('example1'),
settings1 = {
data: data1
},
hot1;
container1.append('allice')
hot1 = new Handsontable(container1, settings1);
data1[0][1] = 'Ford'; // change "Kia" to "Ford" programmatically
hot1.render();
});
%%javascript
this.element.append('steve')
data = [{"name": "Lisp", "value": 1}, {"name": "Scala", "value": 2}, {"name": "Perl", "value": 4}, {"name": "Java", "value": 5}, {"name": "C++", "value": 8}, {"name": "Python", "value": 10}]
tbl = scisheets_widget.SciSheetTable()
tbl.load_list(income_data)
tbl
income_data.to_json(orient='index')
income_data.to_json(orient='values')
income_data.to_json(orient='records')
income_data.to_json(orient='columns')
income_data.to_json(orient='split')
bob = income_data.to_json(orient='split')
import ast
bob2 = ast.literal_eval(bob)
for key, value in bob2.items():
print("Key:")
print(key)
bob2['columns']
json.dumps(bob2['columns'])
bob2['columns']
'''Illustrates how to detect an assignment for a DataFrame.'''
import pandas as pd
class VirtualDataFrame(object):
def __init__(self, df):
self._df = df.copy()
def _value_changed_(self):
if self._df.__setitem__:
print "Setting %s to %s" % (str(key), str(value))
def __setitem__(self, key, value):
print "Setting %s to %s" % (str(key), str(value))
class VirtualDataFrame2(object):
def __init__(self, df):
self._df = df
# def _value_changed_(self):
# if self._df.__setitem__:
# print "Setting %s to %s" % (str(key), str(value))
# def __setitem__(self, key, value):
# print "Setting %s to %s" % (str(key), str(value))
bob2 = VirtualDataFrame2(income_data)
bob2._df.loc[3,'State'] = 'Louisiana'
income_data
bob = VirtualDataFrame(income_data)
bob._df.loc[2,'State'] = 'Alex'
income_data
class with_called_attribute:
def __init__(self, func):
self.func = func
self.called = False
def __call__(self, *args, **kwargs):
self.called = True
self.func(*args, **kwargs)
@with_called_attribute
def run_some_thing(*args, **kwargs):
print("You called with:", *args, **kwargs)
run_some_thing.called
#>>> False
run_some_thing(10)
#>>> You called with: 10
run_some_thing.called
#>>> True
class Detector(object):
def __init__(self, df):
self.loadTableIsCalledForObjA = False
self._df = df
def __setitem__(self):
if type(self._df) is pd.DataFrame:
print('True')
bob = Detector(income_data)
bob._df.loc[1, 'State'] =
loadTable(income_data)
detector.loadTableIsCalledForObjA
bob._df.loc[1, 'State'] = 'Barber'
bob['Alice'] = bob['State']
income_data
income_data
import pandas as pd
import numpy as np
class MyDF(pd.DataFrame):
@property
def _constructor(self):
return MyDF
mydf = MyDF(np.random.randn(3,4), columns=['A','B','C','D'])
print type(mydf)
mydf_sub = mydf[['A','C']]
print type(mydf_sub)
import pandas as pd
import numpy as np
class MyDF(pd.DataFrame):
_attributes_ = "myattr1,myattr2"
def __init__(self, *args, **kw):
super(MyDF, self).__init__(*args, **kw)
if len(args) == 1 and isinstance(args[0], MyDF):
args[0]._copy_attrs(self)
def _copy_attrs(self, df):
for attr in self._attributes_.split(","):
df.__dict__[attr] = getattr(self, attr, None)
@property
def _constructor(self):
def f(*args, **kw):
df = MyDF(*args, **kw)
self._copy_attrs(df)
return df
return f
mydf = MyDF(np.random.randn(3,4), columns=['A','B','C','D'])
print type(mydf)
mydf_sub = mydf[['A','C']]
print type(mydf_sub)
mydf.myattr1 = 1
mydf_cp1 = MyDF(mydf)
mydf_cp2 = mydf.copy()
print mydf_cp1.myattr1, mydf_cp2.myattr1
mydf_cp1
mydf_cp2
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler
)
import json
json.dumps(income_data)
import pandas._libs.json as json
dumps = json.dumps
dumps(income_data)
import pandas._libs.
print('help')
print 'help'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data into the notebook
Step2: BUG currently seems to be coming from handonstable rendering
Step3: Testing different ways to convert df to json
|
10,344
|
<ASSISTANT_TASK:>
Python Code:
from pomegranate import *
%pylab inline
import numpy as np
data = np.concatenate( (np.random.randn(250, 1) * 2.75 + 1.25, np.random.randn(500, 1) * 1.2 + 7.85) )
np.random.shuffle(data)
plt.hist( data, edgecolor='c', color='c', bins=20 )
d = GeneralMixtureModel( [NormalDistribution(2.5, 1), NormalDistribution(8, 1)] )
labels = d.predict( data )
print labels[:5]
print "{} 1 labels, {} 0 labels".format( labels.sum(), labels.shape[0] - labels.sum() )
plt.hist( data[ labels == 0 ], edgecolor='r', color='r', bins=20 )
plt.hist( data[ labels == 1 ], edgecolor='c', color='c', bins=20 )
labels = d.predict_proba( data )
print labels[:5]
print labels.sum(axis=0)
d.fit( data, verbose=True )
mu = np.arange(5)
cov = np.eye(5)
mgs = [ MultivariateGaussianDistribution( mu*i, cov ) for i in range(5) ]
gmm = GeneralMixtureModel( mgs )
data = numpy.random.randn(1000, 5) * 5
for i in range(5):
data[i::5] += np.arange(5)*i
for i in range(10):
print "Point {}: logp {}".format( i, gmm.log_probability(data[i]) )
gmm.fit(data, verbose=True, stop_threshold=1)
for i in range(10):
print "Point {}: logp {}".format( i, gmm.log_probability(data[i]) )
d1 = DiscreteDistribution( {'A' : 0.25, 'C': 0.25, 'G' : 0.25, 'T': 0.25 } ) # Background
d2 = DiscreteDistribution( {'A' : 0.05, 'C': 0.45, 'G' : 0.45, 'T': 0.05 } ) # CG rich regions
gmm = GeneralMixtureModel( [d1, d2] )
seq = numpy.array(list('CGACATCTGACTACGGCGCGCCTACTACTTGATCGATACGGCGTCAGCGACGACGATGATCGGCATCAGTCACTAC'))
gmm.fit(seq)
print gmm.distributions
print
print numpy.exp(gmm.weights)
print gmm.to_json()
gmm_2 = GeneralMixtureModel.from_json( gmm.to_json() )
gmm_2.distributions
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can create our initial estimate of what this distribution is a General Mixture Model. This is a model which is comprised of multiple distributions, and weights on those distributions representing the prior probability of a point falling under that distribution given no knowledge of the point itself (defaults to equal). We can have univariate mixture models by using univariate distributions, or multivariate distributions by using multivariate distributions.
Step2: We can now predict the class labels of each point under this mixture.
Step3: This is fairly close to the number of underlying points from each distribution, off by 17 in each label. We still don't know if the labels are accurate, just the number of labels.
Step4: It is slightly more difficult to update the underlying components of the model because we don't have labels indicating which point came from which distribution. We could try to use the labels inferred from the model. It seems to cleanly split it, but what if our initial estimate was not very good? It could be difficult to get a good update if we had a bad prior.
Step5: This is slightly closer to the truth, with 15.2 off instead of 17, around 10% closer.
Step6: Multivariate Gaussian Mixture Models
Step7: Lets see how well some points fit under the mixture model.
Step8: Now lets see how well the previous points fit.
Step9: Looks like they're being fit significantly better than before! Training works.
Step10: Looks like in this case the concept was sound, that there many CG rich regions, but our initial estimates of the percentages were off. We can use a GMM like the one above to both identify and study the composition of these regions at the same time, updating the parameters of the distributions using expectation-maximization. We will go into a more complex way of dong this using HMMs in the next tutorial.
Step11: Not the prettiest thing to look at right now. However, we can easily load it and use it immediately.
|
10,345
|
<ASSISTANT_TASK:>
Python Code:
input_form =
<table>
<td style="border-style: none;">
<div style="border: solid 2px #666; width: 143px; height: 144px;">
<canvas width="140" height="140"></canvas>
</div></td>
<td style="border-style: none;">
<button onclick="clear_value()">Clear</button>
</td>
</table>
javascript = '''
<script type="text/Javascript">
var pixels = [];
for (var i = 0; i < 28*28; i++) pixels[i] = 0;
var click = 0;
var canvas = document.querySelector("canvas");
canvas.addEventListener("mousemove", function(e){
if (e.buttons == 1) {
click = 1;
canvas.getContext("2d").fillStyle = "rgb(0,0,0)";
canvas.getContext("2d").fillRect(e.offsetX, e.offsetY, 8, 8);
x = Math.floor(e.offsetY * 0.2);
y = Math.floor(e.offsetX * 0.2) + 1;
for (var dy = 0; dy < 2; dy++){
for (var dx = 0; dx < 2; dx++){
if ((x + dx < 28) && (y + dy < 28)){
pixels[(y+dy)+(x+dx)*28] = 1;
}
}
}
} else {
if (click == 1) set_value();
click = 0;
}
});
function set_value(){
var result = ""
for (var i = 0; i < 28*28; i++) result += pixels[i] + ","
var kernel = IPython.notebook.kernel;
kernel.execute("image = [" + result + "]");
kernel.execute("f = open('request.json', 'w')");
kernel.execute("f.write('{\\"inputs\\":%s}' % image)");
kernel.execute("f.close()");
}
function clear_value(){
canvas.getContext("2d").fillStyle = "rgb(255,255,255)";
canvas.getContext("2d").fillRect(0, 0, 140, 140);
for (var i = 0; i < 28*28; i++) pixels[i] = 0;
}
</script>
'''
from IPython.display import HTML
HTML(input_form + javascript)
%%bash
MODEL_NAME="MNIST"
gcloud ai-platform predict --model ${MODEL_NAME} --json-instances request.json | tee predict.txt
import matplotlib.pyplot as plt
import re
with open('predict.txt') as prediction:
head = prediction.readline()
result = prediction.readline()
r = re.compile("\[(.+)\]")
pred = r.search(result).group(1).split(',')
pred = map(lambda x: float(x), pred)
fig = plt.figure(figsize=(4,2))
subplot = fig.add_subplot(1,1,1)
subplot.set_xticks(range(10))
subplot.set_xlim(-0.5,9.5)
subplot.set_ylim(0,1)
subplot.bar(range(10), pred, align='center')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This creates a canvas where you can draw a handwritten digit.
Step2: Now you can send a request to make a prediction.
Step3: You can also draw a bar chart with the prediction result.
|
10,346
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.3,<2.4"
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.set_value('sma@binary', 20)
b.set_value('q', 0.8)
b.set_value('ecc', 0.8)
b.set_value('per0', 45)
b.get_parameter('t0_supconj', context='component')
b.get_parameter('t0_perpass', context='component')
b.get_parameter('t0_perpass', context='constraint')
b.get_parameter('t0_ref', context='component')
b.get_parameter('t0_ref', context='constraint')
b.get_parameter('t0', context='system')
b.add_dataset('orb', compute_times=np.linspace(-1,1,1001))
b.run_compute(ltte=False)
afig, mplfig = b.plot(x='us', y='ws', z=0, time='t0_supconj', show=True)
afig, mplfig = b.plot(x='us', y='ws', z=0, time='t0_perpass', show=True)
afig, mplfig = b.plot(x='us', y='ws', z=0, time='t0_ref', show=True)
b.to_phase(0.0)
b.to_phase(0.0, component='binary', t0='t0_supconj')
b.to_phase(0.0, component='binary', t0='t0_perpass')
b.to_phase(0.0, component='binary', t0='t0_ref')
b.add_dataset('lc', times=np.linspace(0,1,51), ld_mode='manual', ld_func='linear', ld_coeffs=[0.0])
b.run_compute(ltte=False, irrad_method='none', atm='blackbody')
afig, mplfig = b['lc01@model'].plot(x='phases', t0='t0_supconj', xlim=(-0.3,0.3), show=True)
afig, mplfig = b['lc01@model'].plot(x='phases', t0='t0_perpass', xlim=(-0.3,0.3), show=True)
afig, mplfig = b['lc01@model'].plot(x='phases', t0='t0_ref', xlim=(-0.3,0.3), show=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle.
Step2: And let's make our system a little more interesting so that we can discriminate between the various t0s
Step3: t0 Parameters
Step4: t0_perpass defines the time at which both components in our orbit is at periastron passage. By default this parameter is constrained by t0_supconj. For more details or information on how to change which parameter is editable, see the Constraints Tutorial.
Step5: The t0_ref defines the time at which the primary component in our orbit passes an arbitrary reference point. This t0_ref is defined in the same way as PHOEBE legacy's 'HJD0' parameter, so is included for convenience translating between the two.
Step6: In addition, there is a single t0 parameter that is system-wide. This parameter simply defines the time at which all parameters are defined and therefore at which all computations start. The value of this parameter begins to play an important role if any parameter is given a time-derivative (see apsidal motion and dpdt, for example) or when using N-body instead of Keplerian dynamics (coming in a future release).
Step7: Influence on Oribits (positions)
Step8: To visualize where these times are with respect to the orbits, we can plot the model orbit and highlight the positions of each star at the times defined by these parameters. Note here that the observer is in the positive w-direction.
Step9: Influence on Phasing
Step10: Similarly, if plotting phases on any axis, passing the 't0' keyword will set the zero-phase accordingly. To see this, let's compute a light curve and phase it with the various t0s shown in the orbits above.
|
10,347
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
import numpy as np
try:
import pandas as pd
except:
pass
import flopy
m = flopy.modflow.Modflow('mnw2example', model_ws='temp')
dis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m)
node_data = pd.DataFrame([[1, 1, 9.5, 7.1, 'well1', 'skin', -1, 0, 0, 0, 1., 2., 5., 6.2],
[1, 1, 7.1, 5.1, 'well1', 'skin', -1, 0, 0, 0, 0.5, 2., 5., 6.2],
[3, 3, 9.1, 3.7, 'well2', 'skin', -1, 0, 0, 0, 1., 2., 5., 4.1]],
columns=['i', 'j', 'ztop', 'zbotm', 'wellid', 'losstype', 'pumploc', 'qlimit', 'ppflag', 'pumpcap',
'rw', 'rskin', 'kskin', 'zpump'])
node_data
node_data = node_data.to_records()
node_data
stress_period_data = pd.DataFrame([[0, 'well1', 0],
[1, 'well1', 100.0],
[0, 'well2', 0],
[1, 'well2', 1000.]], columns=['per', 'wellid', 'qdes'])
stress_period_data
pers = stress_period_data.groupby('per')
stress_period_data = {i: pers.get_group(i).to_records() for i in [0, 1]}
stress_period_data
mnw2 = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,
node_data=node_data,
stress_period_data=stress_period_data,
itmp=[2, 2, -1], # reuse second per pumping for last stress period
)
# "nodtot" is computed automatically
mnw2.nodtot
pd.DataFrame(mnw2.node_data)
pd.DataFrame(mnw2.stress_period_data[0])
pd.DataFrame(mnw2.stress_period_data[1])
tmp = flopy.modflow.ModflowMnw2(model=m,
itmp=[1, 1, -1], # reuse second per pumping for last stress period
)
node_data = tmp.get_empty_node_data(3)
node_data
mnw2.mnw
mnw2.mnw['well1'].__dict__
pd.DataFrame(mnw2.mnw['well1'].node_data)
pd.DataFrame(mnw2.mnw['well2'].stress_period_data)
mnw2fromobj = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,
mnw=mnw2.mnw,
itmp=[2, 2, -1], # reuse second per pumping for last stress period
)
pd.DataFrame(mnw2fromobj.node_data)
pd.DataFrame(mnw2fromobj.stress_period_data[0])
pd.DataFrame(mnw2fromobj.stress_period_data[1])
per1 = flopy.modflow.ModflowMnw2.get_empty_stress_period_data(itmp=2)
per1
mnw2.write_file(os.path.join('temp/test.mnw2'))
junk = [print(l.strip('\n')) for l in open('temp/test.mnw2').readlines()]
path = os.path.join('..', '..', 'examples', 'data', 'mf2005_test')
cpth = os.path.join('..', '..', 'autotest', 'temp')
m = flopy.modflow.Modflow('MNW2-Fig28', model_ws=cpth)
dis = flopy.modflow.ModflowDis.load(path + '/MNW2-Fig28.dis', m)
m.get_package_list()
mnw2pth = os.path.join(path, 'MNW2-Fig28.mnw2')
mnw2 = flopy.modflow.ModflowMnw2.load(mnw2pth, m)
pd.DataFrame(mnw2.node_data)
pd.DataFrame(mnw2.stress_period_data[0])
mnw2.mnw
pd.DataFrame(mnw2.mnw['Well-A'].stress_period_data)
path = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples')
cpth = os.path.join('temp')
m = flopy.modflow.Modflow('br', model_ws=cpth)
mnw2 = flopy.modflow.ModflowMnw2.load(path + '/BadRiver_cal.mnw2', m)
df = pd.DataFrame(mnw2.node_data)
df.loc[:, df.sum(axis=0) != 0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make an MNW2 package from scratch
Step2: MNW2 information by node
Step3: convert the DataFrame to a rec array for compatibility with flopy
Step4: Stress period information
Step5: Make ModflowMnw2 package object
Step6: empty node_data and stress_period_data tables can also be generated by the package class, and then filled
Step7: Mnw objects
Step8: Note that Mnw object attributes for variables that vary by node are lists (e.g. rw above)
Step9: Instead of a dict keyed by stress period, Mnw.stress_period_data is a recarray with pumping data listed by stress period for that well
Step10: Build the same package using only the Mnw objects
Step11: By default, the node_data and stress_period_data tables attached to the ModflowMnw2 package class are definitive
Step12: Write an MNW2 package file and inspect the results
Step13: Load some example MNW2 packages
|
10,348
|
<ASSISTANT_TASK:>
Python Code:
import gzip
import cPickle as pickle
with gzip.open("../data/train.pklz", "rb") as train_file:
train_set = pickle.load(train_file)
with gzip.open("../data/test.pklz", "rb") as test_file:
test_set = pickle.load(test_file)
with gzip.open("../data/questions.pklz", "rb") as questions_file:
questions = pickle.load(questions_file)
print "train_set: ", len(train_set)
print "test_set: ", len(test_set)
print "questions: ", len(questions)
print sorted(train_set.keys())[:10]
print train_set[1]
print train_set[1].keys()
print sorted(test_set.keys())[:10]
print test_set[7]
print test_set[7].keys()
print sorted(questions.keys())[:10]
print questions[1]
print questions[1].keys()
X_train = []
Y_train = []
for key in train_set:
# We only care about positive case at this time
if train_set[key]['position'] < 0:
continue
uid = train_set[key]['uid']
qid = train_set[key]['qid']
pos = train_set[key]['position']
q_length = max(questions[qid]['pos_token'].keys())
feat = [uid, qid, q_length]
X_train.append(feat)
Y_train.append([pos])
print len(X_train)
print len(Y_train)
print X_train[0], Y_train[0]
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, Y_train)
X_test = []
test_id = []
for key in test_set:
test_id.append(key)
uid = test_set[key]['uid']
qid = test_set[key]['qid']
q_length = max(questions[qid]['pos_token'].keys())
feat = [uid, qid, q_length]
X_test.append(feat)
predictions = model.predict(X_test)
predictions = sorted([[id, predictions[index][0]] for index, id in enumerate(test_id)])
print len(predictions)
predictions[:5]
import csv
predictions.insert(0,["id", "position"])
with open('guess.csv', 'wb') as fp:
writer = csv.writer(fp, delimiter=',')
writer.writerows(predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's take a look the loaded data set.
Step2: Make training set
Step3: It means that user 0 tried to solve question number 1 which has 77 tokens for question and he or she answered at 61st token.
Step4: Let's make test set for testing.
Step5: Here is 4749 predictions.
|
10,349
|
<ASSISTANT_TASK:>
Python Code:
# Set `PATH` to include the directory containing TFX CLI and skaffold.
PATH=%env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
!grep 'BASE_IMAGE =' -A 5 pipeline/covertype_training_pipeline.py
%%writefile ./pipeline/covertype_training_pipeline.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KFP pipeline orchestrating BigQuery and Cloud AI Platform services.
import os
from helper_components import evaluate_model
from helper_components import retrieve_best_run
from jinja2 import Template
import kfp
from kfp.components import func_to_container_op
from kfp.dsl.types import Dict
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv('BASE_IMAGE')
TRAINER_IMAGE = os.getenv('TRAINER_IMAGE')
RUNTIME_VERSION = os.getenv('RUNTIME_VERSION')
PYTHON_VERSION = os.getenv('PYTHON_VERSION')
COMPONENT_URL_SEARCH_PREFIX = os.getenv('COMPONENT_URL_SEARCH_PREFIX')
USE_KFP_SA = os.getenv('USE_KFP_SA')
TRAINING_FILE_PATH = 'datasets/training/data.csv'
VALIDATION_FILE_PATH = 'datasets/validation/data.csv'
TESTING_FILE_PATH = 'datasets/testing/data.csv'
# Parameter defaults
SPLITS_DATASET_ID = 'splits'
HYPERTUNE_SETTINGS =
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
Prepares the data sampling query.
sampling_query_template =
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1])
return query
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
mlengine_deploy_op = component_store.load_component('ml_engine/deploy')
retrieve_best_run_op = func_to_container_op(
retrieve_best_run, base_image=BASE_IMAGE)
evaluate_model_op = func_to_container_op(evaluate_model, base_image=BASE_IMAGE)
@kfp.dsl.pipeline(
name='Covertype Classifier Training',
description='The pipeline training and deploying the Covertype classifierpipeline_yaml'
)
def covertype_train(project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location='US'):
Orchestrates training and deployment of an sklearn model.
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4])
training_file_path = '{}/{}'.format(gcs_root, TRAINING_FILE_PATH)
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=training_file_path,
dataset_location=dataset_location)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8])
validation_file_path = '{}/{}'.format(gcs_root, VALIDATION_FILE_PATH)
create_validation_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=validation_file_path,
dataset_location=dataset_location)
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = '{}/{}'.format(gcs_root, TESTING_FILE_PATH)
create_testing_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=testing_file_path,
dataset_location=dataset_location)
# Tune hyperparameters
tune_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--hptune', 'True'
]
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir/hypertune',
kfp.dsl.RUN_ID_PLACEHOLDER)
hypertune = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=tune_args,
training_input=hypertune_settings)
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(
project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir', kfp.dsl.RUN_ID_PLACEHOLDER)
train_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--alpha',
get_best_trial.outputs['alpha'], '--max_iter',
get_best_trial.outputs['max_iter'], '--hptune', 'False'
]
train_model = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=train_args)
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs['output_gcs_path']),
model_path=str(train_model.outputs['job_dir']),
metric_name=evaluation_metric_name)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(eval_model.outputs['metric_value'] > evaluation_metric_threshold):
deploy_model = mlengine_deploy_op(
model_uri=train_model.outputs['job_dir'],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == 'True':
kfp.dsl.get_pipeline_conf().add_op_transformer(
use_gcp_secret('user-gcp-sa'))
!cat base_image/Dockerfile
!cat trainer_image/Dockerfile
REGION = 'us-central1'
ENDPOINT = '2154b2f21283d4d2-dot-europe-west1.pipelines.googleusercontent.com'
ARTIFACT_STORE_URI = 'gs://qwiklabs-gcp-01-3aff5ef1f764-kubeflowpipelines-default'
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
IMAGE_NAME='trainer_image'
TAG='latest'
TRAINER_IMAGE='gcr.io/{}/{}:{}'.format(PROJECT_ID, IMAGE_NAME, TAG)
!gcloud builds submit --timeout 15m --tag $TRAINER_IMAGE trainer_image
IMAGE_NAME='base_image'
TAG='latest'
BASE_IMAGE='gcr.io/{}/{}:{}'.format(PROJECT_ID, IMAGE_NAME, TAG)
!gcloud builds submit --timeout 15m --tag $BASE_IMAGE base_image
USE_KFP_SA = False
COMPONENT_URL_SEARCH_PREFIX = 'https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/'
RUNTIME_VERSION = '1.15'
PYTHON_VERSION = '3.7'
%env USE_KFP_SA={USE_KFP_SA}
%env BASE_IMAGE={BASE_IMAGE}
%env TRAINER_IMAGE={TRAINER_IMAGE}
%env COMPONENT_URL_SEARCH_PREFIX={COMPONENT_URL_SEARCH_PREFIX}
%env RUNTIME_VERSION={RUNTIME_VERSION}
%env PYTHON_VERSION={PYTHON_VERSION}
!dsl-compile --py pipeline/covertype_training_pipeline.py --output covertype_training_pipeline.yaml
!head covertype_training_pipeline.yaml
PIPELINE_NAME='covertype_continuous_training'
!kfp --endpoint $ENDPOINT pipeline upload \
-p $PIPELINE_NAME \
covertype_training_pipeline.yaml
!kfp --endpoint $ENDPOINT pipeline list
PIPELINE_ID='defa3c60-637b-4332-88b9-d8647c2aec84'
EXPERIMENT_NAME = 'Covertype_Classifier_Training'
RUN_ID = 'Run_001'
SOURCE_TABLE = 'covertype_dataset.covertype'
DATASET_ID = 'splits'
EVALUATION_METRIC = 'accuracy'
EVALUATION_METRIC_THRESHOLD = '0.69'
MODEL_ID = 'covertype_classifier'
VERSION_ID = 'v01'
REPLACE_EXISTING_VERSION = 'True'
GCS_STAGING_PATH = '{}/staging'.format(ARTIFACT_STORE_URI)
!kfp --endpoint $ENDPOINT run submit \
-e $EXPERIMENT_NAME \
-r $RUN_ID \
-p $PIPELINE_ID \
project_id=$PROJECT_ID \
gcs_root=$GCS_STAGING_PATH \
region=$REGION \
source_table_name=$SOURCE_TABLE \
dataset_id=$DATASET_ID \
evaluation_metric_name=$EVALUATION_METRIC \
evaluation_metric_threshold=$EVALUATION_METRIC_THRESHOLD \
model_id=$MODEL_ID \
version_id=$VERSION_ID \
replace_existing_version=$REPLACE_EXISTING_VERSION
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Understanding the pipeline design
Step7: The pipeline uses a mix of custom and pre-build components.
Step8: The custom components execute in a container image defined in base_image/Dockerfile.
Step9: The training step in the pipeline employes the AI Platform Training component to schedule a AI Platform Training job in a custom training container. The custom training image is defined in trainer_image/Dockerfile.
Step10: Building and deploying the pipeline
Step11: Build the trainer image
Step12: Build the base image for custom components
Step13: Compile the pipeline
Step14: Use the CLI compiler to compile the pipeline
Step15: The result is the covertype_training_pipeline.yaml file.
Step16: Deploy the pipeline package
Step17: Submitting pipeline runs
Step18: Submit a run
|
10,350
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def print_sum(a, b):
print(a+b)
interact(print_sum, a=(-10,10,.1), b=(-8, 8, 2))
assert True # leave this for grading the print_sum exercise
def print_string(s, length=False):
if length==True:
print("%s has length %d" %(s, len(s)))
else:
print(s)
interact(print_string, s = "Hello World", length = False)
assert True # leave this for grading the print_string exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interact basics
Step2: Use the interact function to interact with the print_sum function.
Step3: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
Step4: Use the interact function to interact with the print_string function.
|
10,351
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
A = np.array([[1,2],[3,4]])
A
b = np.array([3,17])
b
x = la.solve(A, b)
x
np.allclose(A @ x, b)
A1 = np.random.random((1000,1000))
b1 = np.random.random(1000)
%timeit la.solve(A1, b1)
%timeit la.inv(A1) @ b1
import scipy.linalg.lapack as lapack
lu, piv, x, info = lapack.dgesv(A, b)
x
C = np.array([[1, 2+3j], [3-2j, 4]])
C
C.conjugate()
def trace(M):
return np.diag(M).sum()
trace(C)
np.allclose(trace(C), la.eigvals(C).sum())
la.det(C)
np.linalg.matrix_rank(C)
la.norm(C, None) # Frobenius (default)
la.norm(C, 2) # largest sinular value
la.norm(C, -2) # smallest singular value
la.svdvals(C)
la.solve(A, b)
x, resid, rank, s = la.lstsq(A, b)
x
A1 = np.array([[1,2],[2,4]])
A1
b1 = np.array([3, 17])
b1
try:
la.solve(A1, b1)
except la.LinAlgError as e:
print(e)
x, resid, rank, s = la.lstsq(A1, b1)
x
A2 = np.random.random((10,3))
b2 = np.random.random(10)
try:
la.solve(A2, b2)
except ValueError as e:
print(e)
x, resid, rank, s = la.lstsq(A2, b2)
x
def least_squares(X, y):
return la.solve(X.T @ X, X.T @ y)
least_squares(A2, b2)
A = np.array([[1,0.6],[0.6,4]])
A
p, l, u = la.lu(A)
p
l
u
np.allclose(p@l@u, A)
U = la.cholesky(A)
U
np.allclose(U.T @ U, A)
# If workiing wiht complex matrices
np.allclose(U.T.conj() @ U, A)
Q, R = la.qr(A)
Q
np.allclose((la.norm(Q[:,0]), la.norm(Q[:,1])), (1,1))
np.allclose(Q@R, A)
u, v = la.eig(A)
u
v
np.allclose((la.norm(v[:,0]), la.norm(v[:,1])), (1,1))
np.allclose(v @ np.diag(u) @ v.T, A)
np.allclose(v @ np.diag(1/u) @ v.T, la.inv(A))
np.allclose(v @ np.diag(u**5) @ v.T, np.linalg.matrix_power(A, 5))
U, s, V = la.svd(A)
U
np.allclose((la.norm(U[:,0]), la.norm(U[:,1])), (1,1))
s
V
np.allclose((la.norm(V[:,0]), la.norm(V[:,1])), (1,1))
np.allclose(U @ np.diag(s) @ V, A)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Resources
Step2: Using solve is faster and more stable numerically than using matrix inversion
Step3: Under the hood (Optional)
Step4: Basic information about a matrix
Step5: Trace
Step6: Determinant
Step7: Rank
Step8: Norm
Step9: Least-squares solution
Step10: Normal equations
Step11: Matrix Decompositions
Step12: LU
Step13: Choleskey
Step14: QR
Step15: Spectral
Step16: Inverting A
Step17: Powers of A
Step18: SVD
|
10,352
|
<ASSISTANT_TASK:>
Python Code:
from sympy import isprime
[isprime(i) for i in [2, 3, 5, 7, 10, 11, 13, 17, 2017]]
from numpy.random import randint
%timeit sum([isprime(i) for i in randint(1e8, 1e9-1, 10**4)])
from datetime import datetime
today = datetime.today()
YEAR = today.year
print("On va travailler avec l'année", YEAR, "!")
def date_vers_nombre(date):
day = int("{:%d}".format(date))
month = int("{:%m}".format(date))
year = int("{:%Y}".format(date)[-2:])
return day, month, year
date = datetime(YEAR, 1, 12)
print(date_vers_nombre(date))
from itertools import permutations
from operator import mod, mul, add, pow, sub, floordiv
operations = [mod, mul, add, sub, floordiv]
def tous_les_resultats(nombres, ops=operations):
assert len(nombres) == 3
tous = []
for (x, y, z) in permutations(nombres):
# on a un ordre pour x, y, z
for f1 in ops:
for f2 in ops:
tous.append(f1(f2(x, y), z))
tous.append(f1(x, f2(y, z)))
# on enlève les doublons ici
return list(set(tous))
tous_les_resultats([1, 2, 3], [add])
tous_les_resultats([1, 2, 3], [add, mul])
noms_operations = {
mod: '%',
mul: '*',
add: '+',
sub: '-',
floordiv: '/',
}
def tous_les_resultats_2(nombres, ops=operations):
assert len(nombres) == 3
tous = {}
for (x, y, z) in permutations(nombres):
# on a un ordre pour x, y, z
for f1 in ops:
for f2 in ops:
n1 = f1(f2(x, y), z)
s1 = "{}({}({}, {}), {})".format(noms_operations[f1], noms_operations[f2], x, y, z)
tous[s1] = n1
n2 = f1(x, f2(y, z))
s2 = "{}({}, {}({}, {}))".format(noms_operations[f1], x, noms_operations[f2], y, z)
tous[s2] = n2
return tous
tous_les_resultats_2([1, 2, 3], [add])
tous_les_resultats_2([1, 2, 3], [add, mul])
def tous_les_resultats_3(nombres, ops=operations):
assert len(nombres) == 3
tous = {}
for (x, y, z) in permutations(nombres):
# on a un ordre pour x, y, z
for f1 in ops:
for f2 in ops:
n1 = f1(f2(x, y), z)
s1 = "{}({}({}, {}), {})".format(noms_operations[f1], noms_operations[f2], x, y, z)
tous[n1] = s1
n2 = f1(x, f2(y, z))
s2 = "{}({}, {}({}, {}))".format(noms_operations[f1], x, noms_operations[f2], y, z)
tous[n2] = s2
return tous
tous_les_resultats_3([1, 2, 3], [add])
tous_les_resultats_3([1, 2, 3], [add, mul])
def plus_grand_premier(nombres, ops=operations):
tous = tous_les_resultats_3(nombres, ops=ops)
premiers = [ p for p in tous.keys() if isprime(p) ]
plus_grand_premier = max(premiers)
expression = tous[plus_grand_premier]
return plus_grand_premier, expression
plus_grand_premier([1, 2, 3], [add, mul])
plus_grand_premier([1, 2, 3])
def tous_les_resultats_4(nombres, ops=operations):
assert len(nombres) == 3
tous = {}
for (x, y, z) in permutations(nombres):
# on a un ordre pour x, y, z
for f1 in ops:
for f2 in ops:
try:
n1 = f1(f2(x, y), z)
s1 = "{}({}({}, {}), {})".format(noms_operations[f1], noms_operations[f2], x, y, z)
tous[n1] = s1
except:
pass
try:
n2 = f1(x, f2(y, z))
s2 = "{}({}, {}({}, {}))".format(noms_operations[f1], x, noms_operations[f2], y, z)
tous[n2] = s2
except:
pass
return tous
def plus_grand_premier_2(nombres, ops=operations):
tous = tous_les_resultats_4(nombres, ops=ops)
premiers = [ p for p in tous.keys() if isprime(p) ]
plus_grand_premier = max(premiers)
expression = tous[plus_grand_premier]
return plus_grand_premier, expression
plus_grand_premier_2([1, 2, 3], [add, mul])
plus_grand_premier_2([1, 2, 3])
plus_grand_premier_2([12, 1, 93])
date
x, y, z = date_vers_nombre(date)
plus_grand_premier_2([x, y, z])
from datetime import timedelta
def tous_les_jours(year=YEAR):
date = datetime(year, 1, 1)
un_jour = timedelta(days=1)
for i in range(0, 366):
yield date
date += un_jour
if date.year > year: # On est allé trop loin
raise StopIteration
tous = []
for date in tous_les_jours():
x, y, z = date_vers_nombre(date)
p, expr = plus_grand_premier_2([x, y, z])
tous.append(([x, y, z], p, expr))
print("Pour la date {:%d-%m-%Y}, le plus grand nombre premier obtenu est {}, avec l'expression {}.".format(date, p, expr))
max(tous, key=lambda t: t[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Elle marche très bien, et est très rapide !
Step2: Pour des nombres de 8 chiffres (c'est tout petit), elle est vraiment rapide
Step3: $\implies$ $65 ~\text{ms}$ pour 10000 nombres à tester, ça me semble assez rapide pour ce qu'on veut en faire !
Step4: C'est ensuite facile de transformer une date en nombre, selon les deux formats.
Step5: Tester toutes les opérations possibles
Step6: On peut la vérifier sur de petites entrées
Step7: On voit que stocker juste l'entier résultat ne suffit pas, on aimerait garder trace de chaque façon de l'obtenir !
Step8: Si on stocke avec comme clés les expressions, on va en avoir BEAUCOUP.
Step9: Beaucoup plus raisonnable ! Ici, pour le 2ème exemple, le plus grand nombre premier obtenu est $7 = (3 \times 2) + 1$.
Step10: Il faut ignorer les erreurs de calculs et ne pas ajouter le nombre dans ce cas
Step11: Tester sur un jour
Step12: Tester tous les jours de l'année
|
10,353
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# Some of these are hard to distinguish.
# Check https://quickdraw.withgoogle.com/data for examples
zoo = ['frog', 'horse', 'lion', 'monkey', 'octopus', 'owl', 'rhinoceros',
'snail', 'tiger', 'zebra']
# Mapping between category names and ids
animal2id = dict((c,i) for i,c in enumerate(zoo))
id2animal = dict((i,c) for i,c in enumerate(zoo))
for i, animal in id2animal.items():
print("Class {}: {}".format(i, animal))
from six.moves.urllib.request import urlretrieve
import os
DATA_DIR = 'data/'
def maybe_download(url, data_dir):
filename = url.split('/')[-1]
file_path = os.path.join(data_dir, filename)
# Check if the file already exists.
if not os.path.exists(file_path):
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print("Downloading {} to {}".format(url, file_path))
file_path, _ = urlretrieve(url=url, filename=file_path)
else:
print("Using previously downloaded file: {}".format(file_path))
return file_path
def load_data(file_path, max_examples=2000, example_name=''):
d = np.load(open(file_path, 'r'))
d = d[:max_examples,:] # limit number of instances to save memory
print("Loaded {} {} examples of dimension {} from {}".format(
d.shape[0], example_name, d.shape[1], file_path))
return d
data= []
labels =[]
for animal in zoo:
url = "https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/{}.npy".format(animal)
file_path = maybe_download(url, DATA_DIR)
data.append(load_data(file_path, max_examples = 1000, example_name = animal))
labels.extend([animal2id[animal]]*data[-1].shape[0])
data = np.concatenate(data, axis=0)
labels = np.array(labels)
print("Final shape of data: {}".format(data.shape))
import matplotlib.pyplot as plt
n_samples = 10
random_indices = np.random.permutation(data.shape[0])
for i in random_indices[:n_samples]:
print(i, labels[i])
print("Category {}: {}".format(labels[i], id2animal[labels[i]]))
# We'll show the image and its pixel value histogram side-by-side.
# To interpret the values as a 28x28 image, we need to reshape
# the numpy array, which is one dimensional.
image = data[i, :]
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(image.reshape(28, 28), cmap=plt.cm.Greys, interpolation='nearest')
ax2.hist(image, bins=20)
ax1.grid(False)
plt.show()
if data.dtype == 'uint8': # avoid doing this twice
data = data.astype(np.float32)
data = (data - (255 / 2.0)) / 255
random_indices = np.random.permutation(labels.shape[0])
print("Labels before:")
print(labels[random_indices[:5]])
def one_hot(labels, n_classes):
n_labels = len(labels)
one_hot_labels = np.zeros((n_labels, n_classes))
one_hot_labels[np.arange(n_labels), labels] = 1
return one_hot_labels
labels_one_hot = one_hot(labels, len(zoo))
print("Labels after:")
print(labels_one_hot[random_indices[:5]])
n_test_examples = 1000
random_indices = np.random.permutation(data.shape[0])
test_data = data[random_indices[:n_test_examples],:]
test_labels = labels_one_hot[random_indices[:n_test_examples],:]
train_data = data[random_indices[n_test_examples:],:]
train_labels = labels_one_hot[random_indices[n_test_examples:],:]
print("Data shapes: ", test_data.shape, test_labels.shape, train_data.shape, train_labels.shape)
outfile_name = os.path.join(DATA_DIR, "zoo.npz")
with open(outfile_name, 'w') as outfile:
np.savez(outfile, train_data, train_labels, test_data, test_labels)
print ("Saved train/test data to {}".format(outfile_name))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data is fun to look at. Compared to MNIST the classes seem much harder to distinguish
Step2: Our labels are 0,1,2,..,10 right now. We convert to a one-hot representation
Step3: Finally, let's split the data into random train and test partitions
Step4: Save data for other experiments
|
10,354
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import time
from scipy import stats
from scipy.optimize import minimize
stud_learning = pd.read_csv('student_learning_final.csv')
stud_learning.drop(['Unnamed: 0'], axis=1, inplace=True)
cluster_index = pd.read_csv("cluster_index.csv", header=None)
stud_learning['cluster_index'] = cluster_index[1]
stud_learning['frac_incorrect_atts'] = stud_learning['number of incorrect attempts'] / stud_learning['number of attempts']
stud_learning.head()
stud_learning.columns
stud_data = pd.read_hdf('stud_data.hdf','test')
stud_data.head()
stud_data = stud_data.join(stud_learning)
stud_data.head()
stud_data_sum = stud_data.groupby('cluster_index').agg(np.sum).copy()
stud_data_sum['frac_incorrect_atts'] = stud_data_sum['number of incorrect attempts'] / stud_data_sum['number of attempts']
stud_data_sum
arr1 = np.array(stud_data[stud_data['cluster_index'] == 1]['frac_incorrect_atts'])
arr2 = np.array(stud_data[stud_data['cluster_index'] == 2]['frac_incorrect_atts'])
arr1 = arr1[~np.isnan(arr1)]
arr2 = arr2[~np.isnan(arr2)]
stats.ttest_ind(arr1,arr2, equal_var = False)
arr_gam = np.array(stud_data[stud_data['cluster_index'] <= 2]['frac_incorrect_atts'])
arr_nongam = np.array(stud_data[stud_data['cluster_index'] > 2]['frac_incorrect_atts'])
arr_gam = arr_gam[~np.isnan(arr_gam)]
arr_nongam = arr_nongam[~np.isnan(arr_nongam)]
stats.ttest_ind(arr_gam,arr_nongam, equal_var = False)
arr4 = np.array(stud_data[stud_data['cluster_index'] == 4]['frac_incorrect_atts'])
arr5 = np.array(stud_data[stud_data['cluster_index'] == 5]['frac_incorrect_atts'])
arr6 = np.array(stud_data[stud_data['cluster_index'] == 6]['frac_incorrect_atts'])
arr4 = arr4[~np.isnan(arr4)]
arr5 = arr5[~np.isnan(arr5)]
arr6 = arr6[~np.isnan(arr6)]
print(stats.ttest_ind(arr5,arr4, equal_var = False))
print(stats.ttest_ind(arr6,arr5, equal_var = False))
stud_data_mean = stud_data.groupby('cluster_index').agg(np.mean).copy()
stud_data_mean
arr4 = np.array(stud_data[stud_data['cluster_index'] == 4]['frac_3s_atts'])
arr5 = np.array(stud_data[stud_data['cluster_index'] == 5]['frac_3s_atts'])
arr6 = np.array(stud_data[stud_data['cluster_index'] == 6]['frac_3s_atts'])
arr4 = arr4[~np.isnan(arr4)]
arr5 = arr5[~np.isnan(arr5)]
arr6 = arr6[~np.isnan(arr6)]
print(stats.ttest_ind(arr5,arr4, equal_var = False))
print(stats.ttest_ind(arr6,arr5, equal_var = False))
arr4 = np.array(stud_data[stud_data['cluster_index'] == 4]['frac_1s_hints'])
arr5 = np.array(stud_data[stud_data['cluster_index'] == 5]['frac_1s_hints'])
arr6 = np.array(stud_data[stud_data['cluster_index'] == 6]['frac_1s_hints'])
arr4 = arr4[~np.isnan(arr4)]
arr5 = arr5[~np.isnan(arr5)]
arr6 = arr6[~np.isnan(arr6)]
print(stats.ttest_ind(arr5,arr4, equal_var = False))
print(stats.ttest_ind(arr6,arr5, equal_var = False))
arr4 = np.array(stud_data[stud_data['cluster_index'] == 4]['max_atts'])
arr5 = np.array(stud_data[stud_data['cluster_index'] == 5]['max_atts'])
arr6 = np.array(stud_data[stud_data['cluster_index'] == 6]['max_atts'])
arr4 = arr4[~np.isnan(arr4)]
arr5 = arr5[~np.isnan(arr5)]
arr6 = arr6[~np.isnan(arr6)]
print(stats.ttest_ind(arr5,arr4, equal_var = False))
print(stats.ttest_ind(arr6,arr5, equal_var = False))
arr4 = np.array(stud_data[stud_data['cluster_index'] == 4]['max_probl_views'])
arr5 = np.array(stud_data[stud_data['cluster_index'] == 5]['max_probl_views'])
arr6 = np.array(stud_data[stud_data['cluster_index'] == 6]['max_probl_views'])
arr4 = arr4[~np.isnan(arr4)]
arr5 = arr5[~np.isnan(arr5)]
arr6 = arr6[~np.isnan(arr6)]
print(stats.ttest_ind(arr5,arr4, equal_var = False))
print(stats.ttest_ind(arr6,arr5, equal_var = False))
stud_data['learning_parameter'].describe()
stud_data[stud_data['learning_parameter'] < 0].shape[0]
stud_data[stud_data['learning_parameter'] < -0.5].describe()
stud_data[stud_data['learning_parameter'] >= 0.5].describe()
stud_data[stud_data['learning_parameter'] < -0.5].groupby('cluster_index').agg(len)['num_sess']
stud_data[stud_data['learning_parameter'] >= 0.5].groupby('cluster_index').agg(len)['num_sess']
stud_data.groupby('cluster_index').agg(np.mean)
stud_data.groupby('cluster_index').agg(np.std)['learning_parameter']
stud_data.groupby('cluster_index').agg(np.mean)[['frac_incorrect_atts', 'max_atts']].corr()
stud_data.corr()['frac_incorrect_atts']['max_atts']
stud_data['gaming_index'] = stud_data['cluster_index'].replace({1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6:1})
stud_data.groupby('gaming_index').agg(np.mean)
data = pd.read_hdf('data.hdf','test')
data.head()
stud_list = data['Anon Student Id'].unique()
#print(stud_list[:5])
stud_dict = {stud: cluster_index.loc[i, 1] for i, stud in enumerate(stud_list)}
stud_list[0]
cluster_index.loc[0, 1]
stud_dict['Stu_001d187b1b375fe98b88696b250177f0']
stud_df = pd.DataFrame()
for item in stud_dict:
#print(item, stud_dict[item])
stud_df.loc[item, 'cluster_index'] = int(stud_dict[item])
stud_df.head()
data_125 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 1].index)) | \
(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 2].index)) | \
(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 5].index))]
data_125.head()
data_2 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 2].index))]
s1 = data[data['Outcome'] <= 1].groupby(['x']).agg(len)['Problem Name']
s2 = data[data['Outcome'] == 1].groupby(['x']).agg(len)['Problem Name']
s1[8] = s1.loc[8:].sum()
for i in range(9, int(s1.index.max()+1)):
try:
s1.drop(i, inplace=True)
except ValueError:
pass
s2[8] = s2.loc[8:].sum()
for i in range(9, int(s2.index.max()+1)):
try:
s2.drop(i, inplace=True)
except ValueError:
pass
data_1 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 1].index))]
data_2 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 2].index))]
data_3 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 3].index))]
data_4 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 4].index))]
data_5 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 5].index))]
data_6 = data[(data['Anon Student Id'].isin(stud_df[stud_df['cluster_index'] == 6].index))]
s1_1 = data_1[data_1['Outcome'] <= 1].groupby(['x']).agg(len)['Problem Name']
s2_1 = data_1[data_1['Outcome'] == 1].groupby(['x']).agg(len)['Problem Name']
s1_1[8] = s1_1.loc[8:].sum()
for i in range(9, int(s1_1.index.max()+1)):
try:
s1_1.drop(i, inplace=True)
except ValueError:
pass
s2_1[8] = s2_1.loc[8:].sum()
for i in range(9, int(s2_1.index.max()+1)):
try:
s2_1.drop(i, inplace=True)
except ValueError:
pass
s1_2 = data_2[data_2['Outcome'] <= 1].groupby(['x']).agg(len)['Problem Name']
s2_2 = data_2[data_2['Outcome'] == 1].groupby(['x']).agg(len)['Problem Name']
s1_2[8] = s1_2.loc[8:].sum()
for i in range(9, int(s1_2.index.max()+1)):
try:
s1_2.drop(i, inplace=True)
except ValueError:
pass
s2_2[8] = s2_2.loc[8:].sum()
for i in range(9, int(s2_2.index.max()+1)):
try:
s2_2.drop(i, inplace=True)
except ValueError:
pass
s1_3 = data_3[data_3['Outcome'] <= 1].groupby(['x']).agg(len)['Problem Name']
s2_3 = data_3[data_3['Outcome'] == 1].groupby(['x']).agg(len)['Problem Name']
s1_3[8] = s1_3.loc[8:].sum()
for i in range(9, int(s1_3.index.max()+1)):
try:
s1_3.drop(i, inplace=True)
except ValueError:
pass
s2_3[8] = s2_3.loc[8:].sum()
for i in range(9, int(s2_3.index.max()+1)):
try:
s2_3.drop(i, inplace=True)
except ValueError:
pass
s1_4 = data_4[data_4['Outcome'] <= 1].groupby(['x']).agg(len)['Problem Name']
s2_4 = data_4[data_4['Outcome'] == 1].groupby(['x']).agg(len)['Problem Name']
s1_4[8] = s1_4.loc[8:].sum()
for i in range(9, int(s1_4.index.max()+1)):
try:
s1_4.drop(i, inplace=True)
except ValueError:
pass
s2_4[8] = s2_4.loc[8:].sum()
for i in range(9, int(s2_4.index.max()+1)):
try:
s2_4.drop(i, inplace=True)
except ValueError:
pass
s1_5 = data_5[data_5['Outcome'] <= 1].groupby(['x']).agg(len)['Problem Name']
s2_5 = data_5[data_5['Outcome'] == 1].groupby(['x']).agg(len)['Problem Name']
s1_5[8] = s1_5.loc[8:].sum()
for i in range(9, int(s1_5.index.max()+1)):
try:
s1_5.drop(i, inplace=True)
except ValueError:
pass
s2_5[8] = s2_5.loc[8:].sum()
for i in range(9, int(s2_5.index.max()+1)):
try:
s2_5.drop(i, inplace=True)
except ValueError:
pass
s1_6 = data_6[data_6['Outcome'] <= 1].groupby(['x']).agg(len)['Problem Name']
s2_6 = data_6[data_6['Outcome'] == 1].groupby(['x']).agg(len)['Problem Name']
s1_6[8] = s1_6.loc[8:].sum()
for i in range(9, int(s1_6.index.max()+1)):
try:
s1_6.drop(i, inplace=True)
except ValueError:
pass
s2_6[8] = s2_6.loc[8:].sum()
for i in range(9, int(s2_6.index.max()+1)):
try:
s2_6.drop(i, inplace=True)
except ValueError:
pass
fig, ax1 = plt.subplots()
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 8.3
fig_size[1] = 4.7
plt.rcParams["figure.figsize"] = fig_size
plt.xlim(0.5,8.5)
plt.bar(s1.index, s1, width=0.9)
#plt.bar(s2.index, s2, width=0.9)
#plt.legend(['CORRECT', 'INCORRECT'])
plt.xlabel("Attempt number", size=14)
plt.ylabel("Number of attempts", size=14)
ax1.tick_params(axis ='both', which='major', length=0, labelsize =14, color='black')
ax1.tick_params(axis ='both', which='minor', length=0)
labels = [item.get_text() for item in ax1.get_xticklabels()]
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8+']
#labels = ['2', '4', '6', '8+']
#print(labels)
ax2 = ax1.twinx()
ax2.plot(s1.index, s2/s1, 'r-o', linewidth=4, label='Average')
ax2.plot(s1_1.index, s2_1/s1_1, 'c-+', label='group 1')
ax2.plot(s1_2.index, s2_2/s1_2, 'b-+', label='group 2')
ax2.plot(s1_3.index, s2_3/s1_3, 'c-.', label='group 3')
ax2.plot(s1_4.index, s2_4/s1_4, 'b-.', label='group 4')
ax2.plot(s1_5.index, s2_5/s1_5, 'c-x', label='group 5')
ax2.plot(s1_6.index, s2_6/s1_6, 'b-x', label='group 6')
ax2.legend()
ax2.set_ylabel('Fraction of incorrect attempts', size=14, color='r')
ax2.tick_params('y', colors='r')
ax2.tick_params(axis ='both', which='minor', length=0)
ax2.tick_params(axis ='both', which='major', length=0, labelsize =14, color='red')
ax1.set_xticklabels(labels)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 3 (comparison of learning rates between clusters)
Step1: Determine what clusters more successful in learning in terms of fraction of correct attempts
Step2: Interestingly, group 3 has the smallest fraction of incorrect attempts (~36.5%). Also, not surprisingly, 'frac_incorrect_atts' in group 1 (with large 'frac_1s_hints') is significantly (p-value = 1.75e-8) smaller than in group 2 (with small 'frac_1s_hints' and large 'frac_3s_atts')
Step3: However, the difference of 'frac_incorrect_atts' between students with "gaming" and non-gaming behaviour is not significant (p-value = 0.83)
Step4: Notably, group 5 (students with medium 'num_sess' and 'num_probs') has significantly smaller 'frac_incorrect_atts' than group 4 (students with small 'num_sess' and 'num_probs') but significantly smaller 'frac_incorrect_atts' than in group 6 (students with large 'num_sess' and 'num_probs')
Step5: In other words, for students with non-gaming behaviour 'frac_incorrect_atts' steadily decreases with learning experience.
Step6: There is a significant increase of 'frac_3s_atts' for groups 4-5-6
Step7: , a significant increase of 'frac_1s_hints' for groups 4-5-6
Step8: , and a significant decrease of 'max_atts' for groups 4-5-6
Step9: Increase of max_probl_views is significant between groups 5 and 6
Step10: As we see, increasing "experience" (in group sequence 4-5-6) also leads to
Step11: Moreover, for ~1/3 of students learning parameter is negative
Step12: What best describes "negative learners"? First, look on extreme examples. Take "extreme negative learners" (students with learning rate < -0.5, and compare them with "extreme positive learnens" (students with learning rate > 0.5)
Step13: As we see, "extreme negative learners" are very similar to "extreme positive learners" in terms of 'frac_corr_atts'. However, they opened much more sessions ('num_sess'), tried to solve more problems ('num_probs'), made more attempts to solve the problems ('num_atts') and spent more time (time_atts) for solving them.
Step14: Notably, most of "extreme learners" belong to groups 3 and 4 that have the smallest 'num_atts'
Step15: Together with the smallest group 2, groups 3 and 4 also correspond to the largest 'learning_parameter' variation across their members
Step16: Note that both absolute average value and standard deviation come to zero with increasing 'num_atts'
Step17: As a result, students from groups 3 and 6 contribute less (and students from group 4 contribute more) to problems with large number of attempts and distort the averaged learning curve towards larger 'frac_incorrect_atts'.
Step18: Creating visualisation
|
10,355
|
<ASSISTANT_TASK:>
Python Code:
agencia_for_cliente_producto = train_dataset[['Cliente_ID','Producto_ID'
,'Agencia_ID']].groupby(['Cliente_ID',
'Producto_ID']).agg(lambda x:x.value_counts().index[0]).reset_index()
canal_for_cliente_producto = train_dataset[['Cliente_ID',
'Producto_ID','Canal_ID']].groupby(['Cliente_ID',
'Producto_ID']).agg(lambda x:x.value_counts().index[0]).reset_index()
ruta_for_cliente_producto = train_dataset[['Cliente_ID',
'Producto_ID','Ruta_SAK']].groupby(['Cliente_ID',
'Producto_ID']).agg(lambda x:x.value_counts().index[0]).reset_index()
gc.collect()
agencia_for_cliente_producto.to_pickle('agencia_for_cliente_producto.csv')
canal_for_cliente_producto.to_pickle('canal_for_cliente_producto.csv')
ruta_for_cliente_producto.to_pickle('ruta_for_cliente_producto.csv')
agencia_for_cliente_producto = pd.read_pickle('agencia_for_cliente_producto.csv')
canal_for_cliente_producto = pd.read_pickle('canal_for_cliente_producto.csv')
ruta_for_cliente_producto = pd.read_pickle('ruta_for_cliente_producto.csv')
# train_dataset['log_demand'] = train_dataset['Demanda_uni_equil'].apply(np.log1p)
pivot_train = pd.pivot_table(data= train_dataset[['Cliente_ID','Producto_ID','log_demand','Semana']],
values='log_demand', index=['Cliente_ID','Producto_ID'],
columns=['Semana'], aggfunc=np.mean,fill_value = 0).reset_index()
pivot_train.head()
pivot_train = pd.merge(left = pivot_train, right = agencia_for_cliente_producto, how = 'inner', on = ['Cliente_ID','Producto_ID'])
pivot_train = pd.merge(left = pivot_train, right = canal_for_cliente_producto, how = 'inner', on = ['Cliente_ID','Producto_ID'])
pivot_train = pd.merge(left = pivot_train, right = ruta_for_cliente_producto, how = 'inner', on = ['Cliente_ID','Producto_ID'])
pivot_train.to_pickle('pivot_train_with_zero.pickle')
pivot_train = pd.read_pickle('pivot_train_with_zero.pickle')
pivot_train.to_pickle('pivot_train_with_nan.pickle')
pivot_train = pd.read_pickle('pivot_train_with_nan.pickle')
pivot_train = pivot_train.rename(columns={3: 'Sem3', 4: 'Sem4',5: 'Sem5', 6: 'Sem6',7: 'Sem7', 8: 'Sem8',9: 'Sem9'})
pivot_train.head()
pivot_train.columns.values
test_dataset = pd.read_csv('origin/test.csv')
test_dataset.head()
test_dataset[test_dataset['Semana'] == 10].shape
test_dataset[test_dataset['Semana'] == 11].shape
pivot_test = pd.merge(left=pivot_train, right = test_dataset[['id','Cliente_ID','Producto_ID','Semana']],
on =['Cliente_ID','Producto_ID'],how = 'inner' )
pivot_test.head()
pivot_test_new = pd.merge(pivot_train[['Cliente_ID', 'Producto_ID', 'Sem3', 'Sem4', 'Sem5', 'Sem6', 'Sem7',
'Sem8', 'Sem9']],right = test_dataset, on = ['Cliente_ID','Producto_ID'],how = 'right')
pivot_test_new.head()
pivot_test_new.to_pickle('pivot_test.pickle')
pivot_test.to_pickle('pivot_test.pickle')
pivot_test = pd.read_pickle('pivot_test.pickle')
pivot_test.head()
train_dataset.head()
import itertools
col_list = ['Agencia_ID', 'Ruta_SAK', 'Cliente_ID', 'Producto_ID']
all_combine = itertools.combinations(col_list,2)
list_2element_combine = [list(tuple) for tuple in all_combine]
col_1elm_2elm = col_list + list_2element_combine
col_1elm_2elm
train_dataset_test = train_dataset[train_dataset['Semana'] < 8].copy()
def categorical_useful(train_dataset,pivot_train):
# if is_train:
# train_dataset_test = train_dataset[train_dataset['Semana'] < 8].copy()
# elif is_train == False:
train_dataset_test = train_dataset.copy()
log_demand_by_agen = train_dataset_test[['Agencia_ID','log_demand']].groupby('Agencia_ID').mean().reset_index()
log_demand_by_ruta = train_dataset_test[['Ruta_SAK','log_demand']].groupby('Ruta_SAK').mean().reset_index()
log_demand_by_cliente = train_dataset_test[['Cliente_ID','log_demand']].groupby('Cliente_ID').mean().reset_index()
log_demand_by_producto = train_dataset_test[['Producto_ID','log_demand']].groupby('Producto_ID').mean().reset_index()
log_demand_by_agen_ruta = train_dataset_test[['Agencia_ID', 'Ruta_SAK',
'log_demand']].groupby(['Agencia_ID', 'Ruta_SAK']).mean().reset_index()
log_demand_by_agen_cliente = train_dataset_test[['Agencia_ID', 'Cliente_ID',
'log_demand']].groupby(['Agencia_ID', 'Cliente_ID']).mean().reset_index()
log_demand_by_agen_producto = train_dataset_test[['Agencia_ID', 'Producto_ID',
'log_demand']].groupby(['Agencia_ID', 'Producto_ID']).mean().reset_index()
log_demand_by_ruta_cliente = train_dataset_test[['Ruta_SAK', 'Cliente_ID',
'log_demand']].groupby(['Ruta_SAK', 'Cliente_ID']).mean().reset_index()
log_demand_by_ruta_producto = train_dataset_test[['Ruta_SAK', 'Producto_ID',
'log_demand']].groupby(['Ruta_SAK', 'Producto_ID']).mean().reset_index()
log_demand_by_cliente_producto = train_dataset_test[['Cliente_ID', 'Producto_ID',
'log_demand']].groupby(['Cliente_ID', 'Producto_ID']).mean().reset_index()
log_demand_by_cliente_producto_agen = train_dataset_test[[
'Cliente_ID','Producto_ID','Agencia_ID','log_demand']].groupby(['Cliente_ID',
'Agencia_ID','Producto_ID']).mean().reset_index()
log_sum_by_cliente = train_dataset_test[['Cliente_ID','log_demand']].groupby('Cliente_ID').sum().reset_index()
ruta_freq_semana = train_dataset[['Semana','Ruta_SAK']].groupby(['Ruta_SAK']).count().reset_index()
clien_freq_semana = train_dataset[['Semana','Cliente_ID']].groupby(['Cliente_ID']).count().reset_index()
agen_freq_semana = train_dataset[['Semana','Agencia_ID']].groupby(['Agencia_ID']).count().reset_index()
prod_freq_semana = train_dataset[['Semana','Producto_ID']].groupby(['Producto_ID']).count().reset_index()
pivot_train = pd.merge(left = pivot_train,right = ruta_freq_semana,
how = 'left', on = ['Ruta_SAK']).rename(columns={'Semana': 'ruta_freq'})
pivot_train = pd.merge(left = pivot_train,right = clien_freq_semana,
how = 'left', on = ['Cliente_ID']).rename(columns={'Semana': 'clien_freq'})
pivot_train = pd.merge(left = pivot_train,right = agen_freq_semana,
how = 'left', on = ['Agencia_ID']).rename(columns={'Semana': 'agen_freq'})
pivot_train = pd.merge(left = pivot_train,right = prod_freq_semana,
how = 'left', on = ['Producto_ID']).rename(columns={'Semana': 'prod_freq'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_agen,
how = 'left', on = ['Agencia_ID']).rename(columns={'log_demand': 'agen_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_ruta,
how = 'left', on = ['Ruta_SAK']).rename(columns={'log_demand': 'ruta_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_cliente,
how = 'left', on = ['Cliente_ID']).rename(columns={'log_demand': 'cliente_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_producto,
how = 'left', on = ['Producto_ID']).rename(columns={'log_demand': 'producto_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_agen_ruta,
how = 'left', on = ['Agencia_ID', 'Ruta_SAK']).rename(columns={'log_demand': 'agen_ruta_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_agen_cliente,
how = 'left', on = ['Agencia_ID', 'Cliente_ID']).rename(columns={'log_demand': 'agen_cliente_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_agen_producto,
how = 'left', on = ['Agencia_ID', 'Producto_ID']).rename(columns={'log_demand': 'agen_producto_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_ruta_cliente,
how = 'left', on = ['Ruta_SAK', 'Cliente_ID']).rename(columns={'log_demand': 'ruta_cliente_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_ruta_producto,
how = 'left', on = ['Ruta_SAK', 'Producto_ID']).rename(columns={'log_demand': 'ruta_producto_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_cliente_producto,
how = 'left', on = ['Cliente_ID', 'Producto_ID']).rename(columns={'log_demand': 'cliente_producto_for_log_de'})
pivot_train = pd.merge(left = pivot_train,
right = log_sum_by_cliente,
how = 'left', on = ['Cliente_ID']).rename(columns={'log_demand': 'cliente_for_log_sum'})
pivot_train = pd.merge(left = pivot_train,
right = log_demand_by_cliente_producto_agen,
how = 'left', on = ['Cliente_ID', 'Producto_ID',
'Agencia_ID']).rename(columns={'log_demand': 'cliente_producto_agen_for_log_sum'})
pivot_train['corr'] = pivot_train['producto_for_log_de'] * pivot_train['cliente_for_log_de'] / train_dataset_test['log_demand'].median()
return pivot_train
def define_time_features(df, to_predict = 't_plus_1' , t_0 = 8):
if(to_predict == 't_plus_1' ):
df['t_min_1'] = df['Sem'+str(t_0-1)]
if(to_predict == 't_plus_2' ):
df['t_min_6'] = df['Sem'+str(t_0-6)]
df['t_min_2'] = df['Sem'+str(t_0-2)]
df['t_min_3'] = df['Sem'+str(t_0-3)]
df['t_min_4'] = df['Sem'+str(t_0-4)]
df['t_min_5'] = df['Sem'+str(t_0-5)]
if(to_predict == 't_plus_1' ):
df['t1_min_t2'] = df['t_min_1'] - df['t_min_2']
df['t1_min_t3'] = df['t_min_1'] - df['t_min_3']
df['t1_min_t4'] = df['t_min_1'] - df['t_min_4']
df['t1_min_t5'] = df['t_min_1'] - df['t_min_5']
if(to_predict == 't_plus_2' ):
df['t2_min_t6'] = df['t_min_2'] - df['t_min_6']
df['t3_min_t6'] = df['t_min_3'] - df['t_min_6']
df['t4_min_t6'] = df['t_min_4'] - df['t_min_6']
df['t5_min_t6'] = df['t_min_5'] - df['t_min_6']
df['t2_min_t3'] = df['t_min_2'] - df['t_min_3']
df['t2_min_t4'] = df['t_min_2'] - df['t_min_4']
df['t2_min_t5'] = df['t_min_2'] - df['t_min_5']
df['t3_min_t4'] = df['t_min_3'] - df['t_min_4']
df['t3_min_t5'] = df['t_min_3'] - df['t_min_5']
df['t4_min_t5'] = df['t_min_4'] - df['t_min_5']
return df
def lin_regr(row, to_predict, t_0, semanas_numbers):
row = row.copy()
row.index = semanas_numbers
row = row.dropna()
if(len(row>2)):
X = np.ones(shape=(len(row), 2))
X[:,1] = row.index
y = row.values
regr = linear_model.LinearRegression()
regr.fit(X, y)
if(to_predict == 't_plus_1'):
return regr.predict([[1,t_0+1]])[0]
elif(to_predict == 't_plus_2'):
return regr.predict([[1,t_0+2]])[0]
else:
return None
def lin_regr_features(pivot_df,to_predict, semanas_numbers,t_0):
pivot_df = pivot_df.copy()
semanas_names = ['Sem%i' %i for i in semanas_numbers]
columns = ['Sem%i' %i for i in semanas_numbers]
columns.append('Producto_ID')
pivot_grouped = pivot_df[columns].groupby('Producto_ID').aggregate('mean')
pivot_grouped['LR_prod'] = np.zeros(len(pivot_grouped))
pivot_grouped['LR_prod'] = pivot_grouped[semanas_names].apply(lin_regr, axis = 1,
to_predict = to_predict,
t_0 = t_0, semanas_numbers = semanas_numbers )
pivot_df = pd.merge(pivot_df, pivot_grouped[['LR_prod']], how='left', left_on = 'Producto_ID', right_index=True)
pivot_df['LR_prod_corr'] = pivot_df['LR_prod'] * pivot_df['cliente_for_log_sum'] / 100
return pivot_df
cliente_tabla = pd.read_csv('origin/cliente_tabla.csv')
town_state = pd.read_csv('origin/town_state.csv')
town_state['town_id'] = town_state['Town'].str.split()
town_state['town_id'] = town_state['Town'].str.split(expand = True)
def add_pro_info(dataset):
train_basic_feature = dataset[['Cliente_ID','Producto_ID','Agencia_ID']].copy()
train_basic_feature.drop_duplicates(inplace = True)
cliente_per_town = pd.merge(train_basic_feature,cliente_tabla,on = 'Cliente_ID',how= 'inner' )
# print cliente_per_town.shape
cliente_per_town = pd.merge(cliente_per_town,town_state[['Agencia_ID','town_id']],on = 'Agencia_ID',how= 'inner' )
# print cliente_per_town.shape
cliente_per_town_count = cliente_per_town[['NombreCliente','town_id']].groupby('town_id').count().reset_index()
# print cliente_per_town_count.head()
cliente_per_town_count_final = pd.merge(cliente_per_town[['Cliente_ID','Producto_ID','town_id','Agencia_ID']],
cliente_per_town_count,on = 'town_id',how = 'inner')
# print cliente_per_town_count_final.head()
cliente_per_town_count_final.drop_duplicates(inplace = True)
dataset_final = pd.merge(dataset,cliente_per_town_count_final[['Cliente_ID','Producto_ID','NombreCliente','Agencia_ID']],
on = ['Cliente_ID','Producto_ID','Agencia_ID'],how = 'left')
return dataset_final
pre_product = pd.read_csv('preprocessed_products.csv',index_col = 0)
pre_product['weight_per_piece'] = pd.to_numeric(pre_product['weight_per_piece'], errors='coerce')
pre_product['weight'] = pd.to_numeric(pre_product['weight'], errors='coerce')
pre_product['pieces'] = pd.to_numeric(pre_product['pieces'], errors='coerce')
def add_product(dataset):
dataset = pd.merge(dataset,pre_product[['ID','weight','weight_per_piece','pieces']],
left_on = 'Producto_ID',right_on = 'ID',how = 'left')
return dataset
train_34567 = train_dataset.loc[train_dataset['Semana'].isin([3,4,5,6,7]), :].copy()
train_pivot_34567_to_9 = pivot_train.loc[(pivot_train['Sem9'].notnull()),:].copy()
train_pivot_34567_to_9 = categorical_useful(train_34567,train_pivot_34567_to_9)
del train_34567
gc.collect()
train_pivot_34567_to_9 = define_time_features(train_pivot_34567_to_9, to_predict = 't_plus_2' , t_0 = 9)
train_pivot_34567_to_9 = lin_regr_features(train_pivot_34567_to_9,to_predict ='t_plus_2',
semanas_numbers = [3,4,5,6,7],t_0 = 9)
train_pivot_34567_to_9['target'] = train_pivot_34567_to_9['Sem9']
train_pivot_34567_to_9.drop(['Sem8','Sem9'],axis =1,inplace = True)
#add cum_sum
train_pivot_cum_sum = train_pivot_34567_to_9[['Sem3','Sem4','Sem5','Sem6','Sem7']].cumsum(axis = 1)
train_pivot_34567_to_9.drop(['Sem3','Sem4','Sem5','Sem6','Sem7'],axis =1,inplace = True)
train_pivot_34567_to_9 = pd.concat([train_pivot_34567_to_9,train_pivot_cum_sum],axis =1)
train_pivot_34567_to_9 = train_pivot_34567_to_9.rename(columns={'Sem3': 't_m_6_cum',
'Sem4': 't_m_5_cum','Sem5': 't_m_4_cum',
'Sem6': 't_m_3_cum','Sem7': 't_m_2_cum'})
# add geo_info
train_pivot_34567_to_9 = add_pro_info(train_pivot_34567_to_9)
#add product info
train_pivot_34567_to_9 = add_product(train_pivot_34567_to_9)
train_pivot_34567_to_9.drop(['ID'],axis = 1,inplace = True)
gc.collect()
train_pivot_34567_to_9.head()
train_45678 = train_dataset.loc[train_dataset['Semana'].isin([4,5,6,7,8]), :].copy()
train_pivot_45678_to_9 = pivot_train.loc[(pivot_train['Sem9'].notnull()),:].copy()
train_pivot_45678_to_9 = categorical_useful(train_45678,train_pivot_45678_to_9)
del train_45678
gc.collect()
train_pivot_45678_to_9 = define_time_features(train_pivot_45678_to_9, to_predict = 't_plus_1' , t_0 = 9)
train_pivot_45678_to_9 = lin_regr_features(train_pivot_45678_to_9,to_predict ='t_plus_1',
semanas_numbers = [4,5,6,7,8],t_0 = 8)
train_pivot_45678_to_9['target'] = train_pivot_45678_to_9['Sem9']
train_pivot_45678_to_9.drop(['Sem3','Sem9'],axis =1,inplace = True)
#add cum_sum
train_pivot_cum_sum = train_pivot_45678_to_9[['Sem4','Sem5','Sem6','Sem7','Sem8']].cumsum(axis = 1)
train_pivot_45678_to_9.drop(['Sem4','Sem5','Sem6','Sem7','Sem8'],axis =1,inplace = True)
train_pivot_45678_to_9 = pd.concat([train_pivot_45678_to_9,train_pivot_cum_sum],axis =1)
train_pivot_45678_to_9 = train_pivot_45678_to_9.rename(columns={'Sem4': 't_m_5_cum',
'Sem5': 't_m_4_cum','Sem6': 't_m_3_cum', 'Sem7': 't_m_2_cum','Sem8': 't_m_1_cum'})
# add geo_info
train_pivot_45678_to_9 = add_pro_info(train_pivot_45678_to_9)
#add product info
train_pivot_45678_to_9 = add_product(train_pivot_45678_to_9)
train_pivot_45678_to_9.drop(['ID'],axis = 1,inplace = True)
gc.collect()
train_pivot_45678_to_9.head()
train_pivot_45678_to_9.columns.values
train_pivot_45678_to_9.to_csv('train_pivot_45678_to_9.csv')
train_pivot_45678_to_9 = pd.read_csv('train_pivot_45678_to_9.csv',index_col = 0)
train_pivot_45678_to_9.to_csv('train_pivot_45678_to_9_new.csv')
train_pivot_45678_to_9 = pd.read_csv('train_pivot_45678_to_9_new.csv',index_col = 0)
train_34567 = train_dataset.loc[train_dataset['Semana'].isin([3,4,5,6,7]), :].copy()
train_pivot_34567_to_8 = pivot_train.loc[(pivot_train['Sem8'].notnull()),:].copy()
train_pivot_34567_to_8 = categorical_useful(train_34567,train_pivot_34567_to_8)
del train_34567
gc.collect()
train_pivot_34567_to_8 = define_time_features(train_pivot_34567_to_8, to_predict = 't_plus_1' , t_0 = 8)
train_pivot_34567_to_8 = lin_regr_features(train_pivot_34567_to_8,to_predict = 't_plus_1',
semanas_numbers = [3,4,5,6,7],t_0 = 7)
train_pivot_34567_to_8['target'] = train_pivot_34567_to_8['Sem8']
train_pivot_34567_to_8.drop(['Sem8','Sem9'],axis =1,inplace = True)
#add cum_sum
train_pivot_cum_sum = train_pivot_34567_to_8[['Sem3','Sem4','Sem5','Sem6','Sem7']].cumsum(axis = 1)
train_pivot_34567_to_8.drop(['Sem3','Sem4','Sem5','Sem6','Sem7'],axis =1,inplace = True)
train_pivot_34567_to_8 = pd.concat([train_pivot_34567_to_8,train_pivot_cum_sum],axis =1)
train_pivot_34567_to_8 = train_pivot_34567_to_8.rename(columns={'Sem3': 't_m_5_cum','Sem4': 't_m_4_cum',
'Sem5': 't_m_3_cum','Sem6': 't_m_2_cum',
'Sem7': 't_m_1_cum'})
# add product_info
train_pivot_34567_to_8 = add_pro_info(train_pivot_34567_to_8)
#add product
train_pivot_34567_to_8 = add_product(train_pivot_34567_to_8)
train_pivot_34567_to_8.drop(['ID'],axis = 1,inplace = True)
gc.collect()
train_pivot_34567_to_8.head()
train_pivot_34567_to_8.columns.values
train_pivot_34567_to_8.to_csv('train_pivot_34567_to_8.csv')
train_pivot_34567_to_8 = pd.read_csv('train_pivot_34567_to_8.csv',index_col = 0)
gc.collect()
train_pivot_xgb_time1 = pd.concat([train_pivot_45678_to_9, train_pivot_34567_to_8],axis = 0,copy = False)
train_pivot_xgb_time1.columns.values
train_pivot_xgb_time1.shape
train_pivot_xgb_time1.to_csv('train_pivot_xgb_time1_44fea.csv')
train_pivot_xgb_time1.to_csv('train_pivot_xgb_time1.csv')
del train_pivot_xgb_time1
del train_pivot_45678_to_9
del train_pivot_34567_to_8
gc.collect()
pivot_test.head()
pivot_test_week10 = pivot_test.loc[pivot_test['sem10_sem11'] == 10]
pivot_test_week10.reset_index(drop=True,inplace = True)
pivot_test_week10.head()
pivot_test_week10.shape
train_56789 = train_dataset.loc[train_dataset['Semana'].isin([5,6,7,8,9]), :].copy()
train_pivot_56789_to_10 = pivot_test_week10.copy()
train_pivot_56789_to_10 = categorical_useful(train_56789,train_pivot_56789_to_10)
del train_56789
gc.collect()
train_pivot_56789_to_10 = define_time_features(train_pivot_56789_to_10, to_predict = 't_plus_1' , t_0 = 10)
train_pivot_56789_to_10 = lin_regr_features(train_pivot_56789_to_10,to_predict ='t_plus_1' ,
semanas_numbers = [5,6,7,8,9],t_0 = 9)
train_pivot_56789_to_10.drop(['Sem3','Sem4'],axis =1,inplace = True)
#add cum_sum
train_pivot_cum_sum = train_pivot_56789_to_10[['Sem5','Sem6','Sem7','Sem8','Sem9']].cumsum(axis = 1)
train_pivot_56789_to_10.drop(['Sem5','Sem6','Sem7','Sem8','Sem9'],axis =1,inplace = True)
train_pivot_56789_to_10 = pd.concat([train_pivot_56789_to_10,train_pivot_cum_sum],axis =1)
train_pivot_56789_to_10 = train_pivot_56789_to_10.rename(columns={'Sem5': 't_m_5_cum',
'Sem6': 't_m_4_cum','Sem7': 't_m_3_cum',
'Sem8': 't_m_2_cum','Sem9': 't_m_1_cum'})
# add product_info
train_pivot_56789_to_10 = add_pro_info(train_pivot_56789_to_10)
#
train_pivot_56789_to_10 = add_product(train_pivot_56789_to_10)
train_pivot_56789_to_10.drop(['ID'],axis =1,inplace = True)
train_pivot_56789_to_10.head()
train_pivot_56789_to_10.columns.values
train_pivot_56789_to_10.to_pickle('train_pivot_56789_to_10_44fea.pickle')
train_3456 = train_dataset.loc[train_dataset['Semana'].isin([3,4,5,6]), :].copy()
train_pivot_3456_to_8 = pivot_train.loc[(pivot_train['Sem8'].notnull()),:].copy()
train_pivot_3456_to_8 = categorical_useful(train_3456,train_pivot_3456_to_8)
del train_3456
gc.collect()
train_pivot_3456_to_8 = define_time_features(train_pivot_3456_to_8, to_predict = 't_plus_2' , t_0 = 8)
#notice that the t_0 means different
train_pivot_3456_to_8 = lin_regr_features(train_pivot_3456_to_8,to_predict = 't_plus_2', semanas_numbers = [3,4,5,6],t_0 = 6)
train_pivot_3456_to_8['target'] = train_pivot_3456_to_8['Sem8']
train_pivot_3456_to_8.drop(['Sem7','Sem8','Sem9'],axis =1,inplace = True)
#add cum_sum
train_pivot_cum_sum = train_pivot_3456_to_8[['Sem3','Sem4','Sem5','Sem6']].cumsum(axis = 1)
train_pivot_3456_to_8.drop(['Sem3','Sem4','Sem5','Sem6'],axis =1,inplace = True)
train_pivot_3456_to_8 = pd.concat([train_pivot_3456_to_8,train_pivot_cum_sum],axis =1)
train_pivot_3456_to_8 = train_pivot_3456_to_8.rename(columns={'Sem4': 't_m_4_cum',
'Sem5': 't_m_3_cum','Sem6': 't_m_2_cum', 'Sem3': 't_m_5_cum'})
# add product_info
train_pivot_3456_to_8 = add_pro_info(train_pivot_3456_to_8)
train_pivot_3456_to_8 = add_product(train_pivot_3456_to_8)
train_pivot_3456_to_8.drop(['ID'],axis =1,inplace = True)
train_pivot_3456_to_8.head()
train_pivot_3456_to_8.columns.values
train_pivot_3456_to_8.to_csv('train_pivot_3456_to_8.csv')
train_4567 = train_dataset.loc[train_dataset['Semana'].isin([4,5,6,7]), :].copy()
train_pivot_4567_to_9 = pivot_train.loc[(pivot_train['Sem9'].notnull()),:].copy()
train_pivot_4567_to_9 = categorical_useful(train_4567,train_pivot_4567_to_9)
del train_4567
gc.collect()
train_pivot_4567_to_9 = define_time_features(train_pivot_4567_to_9, to_predict = 't_plus_2' , t_0 = 9)
#notice that the t_0 means different
train_pivot_4567_to_9 = lin_regr_features(train_pivot_4567_to_9,to_predict = 't_plus_2',
semanas_numbers = [4,5,6,7],t_0 = 7)
train_pivot_4567_to_9['target'] = train_pivot_4567_to_9['Sem9']
train_pivot_4567_to_9.drop(['Sem3','Sem8','Sem9'],axis =1,inplace = True)
#add cum_sum
train_pivot_cum_sum = train_pivot_4567_to_9[['Sem7','Sem4','Sem5','Sem6']].cumsum(axis = 1)
train_pivot_4567_to_9.drop(['Sem7','Sem4','Sem5','Sem6'],axis =1,inplace = True)
train_pivot_4567_to_9 = pd.concat([train_pivot_4567_to_9,train_pivot_cum_sum],axis =1)
train_pivot_4567_to_9 = train_pivot_4567_to_9.rename(columns={'Sem4': 't_m_5_cum',
'Sem5': 't_m_4_cum','Sem6': 't_m_3_cum', 'Sem7': 't_m_2_cum'})
# add product_info
train_pivot_4567_to_9 = add_pro_info(train_pivot_4567_to_9)
train_pivot_4567_to_9 = add_product(train_pivot_4567_to_9)
train_pivot_4567_to_9.drop(['ID'],axis =1,inplace = True)
train_pivot_4567_to_9.head()
train_pivot_4567_to_9.columns.values
train_pivot_4567_to_9.to_csv('train_pivot_4567_to_9.csv')
train_pivot_xgb_time2 = pd.concat([train_pivot_3456_to_8, train_pivot_4567_to_9],axis = 0,copy = False)
train_pivot_xgb_time2.columns.values
train_pivot_xgb_time2.shape
train_pivot_xgb_time2.to_csv('train_pivot_xgb_time2_38fea.csv')
train_pivot_xgb_time2 = pd.read_csv('train_pivot_xgb_time2.csv',index_col = 0)
train_pivot_xgb_time2.head()
del train_pivot_3456_to_8
del train_pivot_4567_to_9
del train_pivot_xgb_time2
del train_pivot_34567_to_8
del train_pivot_45678_to_9
del train_pivot_xgb_time1
gc.collect()
pivot_test_week11 = pivot_test_new.loc[pivot_test_new['Semana'] == 11]
pivot_test_week11.reset_index(drop=True,inplace = True)
pivot_test_week11.head()
pivot_test_week11.shape
train_6789 = train_dataset.loc[train_dataset['Semana'].isin([6,7,8,9]), :].copy()
train_pivot_6789_to_11 = pivot_test_week11.copy()
train_pivot_6789_to_11 = categorical_useful(train_6789,train_pivot_6789_to_11)
del train_6789
gc.collect()
train_pivot_6789_to_11 = define_time_features(train_pivot_6789_to_11, to_predict = 't_plus_2' , t_0 = 11)
train_pivot_6789_to_11 = lin_regr_features(train_pivot_6789_to_11,to_predict ='t_plus_2' ,
semanas_numbers = [6,7,8,9],t_0 = 9)
train_pivot_6789_to_11.drop(['Sem3','Sem4','Sem5'],axis =1,inplace = True)
#add cum_sum
train_pivot_cum_sum = train_pivot_6789_to_11[['Sem6','Sem7','Sem8','Sem9']].cumsum(axis = 1)
train_pivot_6789_to_11.drop(['Sem6','Sem7','Sem8','Sem9'],axis =1,inplace = True)
train_pivot_6789_to_11 = pd.concat([train_pivot_6789_to_11,train_pivot_cum_sum],axis =1)
train_pivot_6789_to_11 = train_pivot_6789_to_11.rename(columns={'Sem6': 't_m_5_cum',
'Sem7': 't_m_4_cum', 'Sem8': 't_m_3_cum','Sem9': 't_m_2_cum'})
# add product_info
train_pivot_6789_to_11 = add_pro_info(train_pivot_6789_to_11)
train_pivot_6789_to_11 = add_product(train_pivot_6789_to_11)
train_pivot_6789_to_11.drop(['ID'],axis = 1,inplace = True)
train_pivot_6789_to_11.head()
train_pivot_6789_to_11.shape
train_pivot_6789_to_11.to_pickle('train_pivot_6789_to_11_new.pickle')
% time pivot_train_categorical_useful = categorical_useful(train_dataset,pivot_train,is_train = True)
% time pivot_train_categorical_useful = categorical_useful(train_dataset,pivot_train,is_train = True)
pivot_train_categorical_useful_train.to_csv('pivot_train_categorical_useful_with_nan.csv')
pivot_train_categorical_useful_train = pd.read_csv('pivot_train_categorical_useful_with_nan.csv',index_col = 0)
pivot_train_categorical_useful_train.head()
pivot_train_categorical_useful.head()
pivot_train_categorical_useful_time = define_time_features(pivot_train_categorical_useful,
to_predict = 't_plus_1' , t_0 = 8)
pivot_train_categorical_useful_time.head()
pivot_train_categorical_useful_time.columns
# Linear regression features
pivot_train_categorical_useful_time_LR = lin_regr_features(pivot_train_categorical_useful_time, semanas_numbers = [3,4,5,6,7])
pivot_train_categorical_useful_time_LR.head()
pivot_train_categorical_useful_time_LR.columns
pivot_train_categorical_useful_time_LR.to_csv('pivot_train_categorical_useful_time_LR.csv')
pivot_train_categorical_useful_time_LR = pd.read_csv('pivot_train_categorical_useful_time_LR.csv',index_col = 0)
pivot_train_categorical_useful_time_LR.head()
# pivot_train_canal = pd.get_dummies(pivot_train_categorical_useful_train['Canal_ID'])
# pivot_train_categorical_useful_train = pivot_train_categorical_useful_train.join(pivot_train_canal)
# pivot_train_categorical_useful_train.head()
%ls
pre_product = pd.read_csv('preprocessed_products.csv',index_col = 0)
pre_product.head()
pre_product['weight_per_piece'] = pd.to_numeric(pre_product['weight_per_piece'], errors='coerce')
pre_product['weight'] = pd.to_numeric(pre_product['weight'], errors='coerce')
pre_product['pieces'] = pd.to_numeric(pre_product['pieces'], errors='coerce')
pivot_train_categorical_useful_time_LR_weight = pd.merge(pivot_train_categorical_useful_time_LR,
pre_product[['ID','weight','weight_per_piece']],
left_on = 'Producto_ID',right_on = 'ID',how = 'left')
pivot_train_categorical_useful_time_LR_weight.head()
pivot_train_categorical_useful_time_LR_weight = pd.merge(pivot_train_categorical_useful_time_LR,
pre_product[['ID','weight','weight_per_piece']],
left_on = 'Producto_ID',right_on = 'ID',how = 'left')
pivot_train_categorical_useful_time_LR_weight.head()
pivot_train_categorical_useful_time_LR_weight.to_csv('pivot_train_categorical_useful_time_LR_weight.csv')
pivot_train_categorical_useful_time_LR_weight = pd.read_csv('pivot_train_categorical_useful_time_LR_weight.csv',index_col = 0)
pivot_train_categorical_useful_time_LR_weight.head()
%cd '/media/siyuan/0009E198000CD19B/bimbo/origin'
%ls
cliente_tabla = pd.read_csv('cliente_tabla.csv')
town_state = pd.read_csv('town_state.csv')
town_state['town_id'] = town_state['Town'].str.split()
town_state['town_id'] = town_state['Town'].str.split(expand = True)
train_basic_feature = pivot_train_categorical_useful_time_LR_weight[['Cliente_ID','Producto_ID','Agencia_ID']]
cliente_per_town = pd.merge(train_basic_feature,cliente_tabla,on = 'Cliente_ID',how= 'inner' )
cliente_per_town = pd.merge(cliente_per_town,town_state[['Agencia_ID','town_id']],on = 'Agencia_ID',how= 'inner' )
cliente_per_town_count = cliente_per_town[['NombreCliente','town_id']].groupby('town_id').count().reset_index()
cliente_per_town_count['NombreCliente'] = cliente_per_town_count['NombreCliente']/float(100000)
cliente_per_town_count_final = pd.merge(cliente_per_town[['Cliente_ID','Producto_ID','Agencia_ID','town_id']],
cliente_per_town_count,on = 'town_id',how = 'left')
pivot_train_categorical_useful_time_LR_weight_town = pd.merge(pivot_train_categorical_useful_time_LR_weight,
cliente_per_town_count_final[['Cliente_ID','Producto_ID','NombreCliente']],
on = ['Cliente_ID','Producto_ID'],how = 'left')
cliente_tabla.head()
town_state.head()
town_state['town_id'] = town_state['Town'].str.split()
town_state['town_id'] = town_state['Town'].str.split(expand = True)
town_state.head()
pivot_train_categorical_useful_time_LR_weight.columns.values
train_basic_feature = pivot_train_categorical_useful_time_LR_weight[['Cliente_ID','Producto_ID','Agencia_ID']]
cliente_per_town = pd.merge(train_basic_feature,cliente_tabla,on = 'Cliente_ID',how= 'inner' )
cliente_per_town = pd.merge(cliente_per_town,town_state[['Agencia_ID','town_id']],on = 'Agencia_ID',how= 'inner' )
cliente_per_town.head()
cliente_per_town_count = cliente_per_town[['NombreCliente','town_id']].groupby('town_id').count().reset_index()
cliente_per_town_count['NombreCliente'] = cliente_per_town_count['NombreCliente']/float(100000)
cliente_per_town_count.head()
cliente_per_town_count_final = pd.merge(cliente_per_town[['Cliente_ID','Producto_ID','Agencia_ID','town_id']],
cliente_per_town_count,on = 'town_id',how = 'left')
cliente_per_town_count_final.head()
pivot_train_categorical_useful_time_LR_weight_town = pd.merge(pivot_train_categorical_useful_time_LR_weight,
cliente_per_town_count_final[['Cliente_ID','Producto_ID','NombreCliente']],
on = ['Cliente_ID','Producto_ID'],how = 'left')
pivot_train_categorical_useful_time_LR_weight_town.head()
pivot_train_categorical_useful_time_LR_weight_town.columns.values
train_pivot_xgb_time1.columns.values
train_pivot_xgb_time1 = train_pivot_xgb_time1.drop(['Cliente_ID','Producto_ID','Agencia_ID',
'Ruta_SAK','Canal_ID'],axis = 1)
pivot_train_categorical_useful_train_time_no_nan = pivot_train_categorical_useful_train[pivot_train_categorical_useful_train['Sem8'].notnull()]
# pivot_train_categorical_useful_train_time_no_nan = pivot_train_categorical_useful_train[pivot_train_categorical_useful_train['Sem9'].notnull()]
pivot_train_categorical_useful_train_time_no_nan_sample = pivot_train_categorical_useful_train_time_no_nan.sample(1000000)
train_feature = pivot_train_categorical_useful_train_time_no_nan_sample.drop(['Sem8','Sem9'],axis = 1)
train_label = pivot_train_categorical_useful_train_time_no_nan_sample[['Sem8','Sem9']]
#seperate train and test data
# datasource: sparse_week_Agencia_Canal_Ruta_normalized_csr label:train_label
%time train_set, valid_set, train_labels, valid_labels = train_test_split(train_feature,\
train_label, test_size=0.10)
# dtrain = xgb.DMatrix(train_feature,label = train_label['Sem8'],missing=NaN)
dtrain = xgb.DMatrix(train_feature,label = train_label['Sem8'],missing=NaN)
param = {'booster':'gbtree',
'nthread': 7,
'max_depth':6,
'eta':0.2,
'silent':0,
'subsample':0.7,
'objective':'reg:linear',
'eval_metric':'rmse',
'colsample_bytree':0.7}
# param = {'eta':0.1, 'eval_metric':'rmse','nthread': 8}
# evallist = [(dvalid,'eval'), (dtrain,'train')]
num_round = 1000
# plst = param.items()
# bst = xgb.train( plst, dtrain, num_round, evallist )
cvresult = xgb.cv(param, dtrain, num_round, nfold=5,show_progress=True,show_stdv=False,
seed = 0, early_stopping_rounds=10)
print(cvresult.tail())
# xgb.plot_importance(cvresult)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: make pivot table of test
Step2: groupby use Agencia_ID, Ruta_SAK, Cliente_ID, Producto_ID
Step3: if predict week 8, use data from 3,4,5,6,7
Step4: data for predict week [34567----9], time plus 2 week
Step5: data for predict week 8&9, time plus 1 week
Step6: train_34567 7+1 = 8
Step7: concat train_pivot_45678_to_9 & train_pivot_34567_to_8 to perform t_plus_1, train_data is over
Step8: prepare for test data, for week 10, we use 5,6,7,8,9
Step9: begin predict for week 11
Step10: train_4567 for 7 + 2 = 9
Step11: concat
Step12: for test data week 11, we use 6,7,8,9
Step13: over
Step14: create time feature
Step15: fit mean feature on target
Step16: add dummy feature
Step17: add product feature
Step18: add town feature
Step19: begin xgboost training
Step20: for 1 week later
|
10,356
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ipywidgets import interact, fixed
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
barZ = np.array([[1],[3]])
QZ = np.array([[3,1],[1,1]])
a = barZ[0]
b = QZ[0,0]
xx = np.linspace(-6, 10, 100)
R = QZ[0,0]-QZ[0,1]*QZ[0,1]/QZ[1,1]
def pltbayesgauss(obs):
hatX = barZ[0]+QZ[0,1]*(obs-barZ[1])/QZ[1,1]
plt.plot([obs,obs],[0,1],':')
plt.plot(xx, stats.norm.pdf(xx, a, b),label='loi a priori')
plt.plot(xx, stats.norm.pdf(xx, hatX, R),label='loi a posteriori')
plt.ylim([0,0.25])
plt.legend()
plt.show()
interact(pltbayesgauss, obs=(-6,10,0.1))
plt.show()
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
kmax = 300
EX0, VX0 = 10, 5
A, B, QW = 0.9, 1, 0.1
sQW = np.sqrt(QW)
sVX0 = np.sqrt(VX0)
def sys_lin(EX0, sVX0, A, B, sQW):
W = sQW*np.random.randn(kmax)
X = np.ones(kmax+1)
X[0] = EX0+sVX0*np.random.randn()
for k in range(kmax):
X[k+1] = A*X[k]+B*W[k]
return X
def sys_lin_loi(EX0, sVX0, A, B, sQW):
espX = np.ones(kmax+1)
varX = np.ones(kmax+1)
espX[0] = EX0
for k in range(kmax):
espX[k+1] = A*espX[k]
varX[k+1] = A*A*varX[k]+B*B*QW
return espX, varX
X = sys_lin(EX0, sVX0, A, B, sQW)
espX, varX = sys_lin_loi(EX0, sVX0, A, B, sQW)
plt.plot([0, kmax], [0, 0], color="g", linestyle=':')
plt.plot(espX,color='k')
plt.fill_between(range(kmax+1),espX+2*np.sqrt(varX),
espX-2*np.sqrt(varX), color = '0.75', alpha=0.4)
plt.plot(X)
plt.show()
from ipywidgets import interact, fixed
def plt_sys_lin(A, B, iseed):
np.random.seed(iseed)
X = sys_lin(10, 0, A, B, 1)
plt.plot([0, kmax], [0, 0], color="g", linestyle=':')
plt.plot(X)
plt.ylim([-4,15])
plt.show()
interact(plt_sys_lin, A=(0,1,0.01), B=(0.,6,0.1),
iseed=(1,100,1))
plt.show()
kmax = 300
mcmax = 300
EX0, VX0 = 10, 5
A, B, QW = 0.9, 1, 0.1
sQW = np.sqrt(QW)
sVX0 = np.sqrt(VX0)
def sys_lin_vec(mcmax,EX0, sVX0, A, B, sQW):
W = sQW*np.random.randn(kmax,mcmax)
X = np.ones((kmax+1,mcmax))
X[0,] = EX0+sVX0*np.random.randn()
for k in range(kmax):
X[k+1,] = A*X[k,]+B*W[k,]
return X
X = sys_lin_vec(mcmax, EX0, sVX0, A, B, sQW)
plt.plot(X,alpha=.04,color='b')
plt.plot(espX,color='w')
plt.plot(espX+2*np.sqrt(varX),color='k')
plt.plot(espX-2*np.sqrt(varX),color='k')
plt.show()
mcmax = 10000
X = sys_lin_vec(mcmax, EX0, sVX0, A, B, sQW)
num_bins = 30
n, bins, patches = plt.hist(X[-1,], num_bins, normed=1,
facecolor='green', alpha=0.5)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
kmax = 300
EX0, VX0 = 10, 5
A, B, QW = 0.9, 1, 0.1
H, QV = 1, 0.2
sQW = np.sqrt(QW)
sQV = np.sqrt(QV)
sVX0 = np.sqrt(VX0)
def sys_lin_esp_etat(EX0, sVX0, A, B, H, sQW, sQV):
W = sQW*np.random.randn(kmax)
V = sQV*np.random.randn(kmax)
X = np.ones(kmax+1)
Y = np.ones(kmax+1)
X[0] = EX0+sVX0*np.random.randn()
Y[0] = 0 # on s en moque
for k in range(kmax):
X[k+1] = A*X[k]+B*W[k]
Y[k+1] = H*X[k+1]+V[k]
return X,Y
def kalman(EX0, sVX0, A, B, H, sQW, sQV, Y):
hatX = np.ones(kmax+1)
R = np.ones(kmax+1)
hatX[0] = EX0
R[0] = sVX0*sVX0
for k in range(kmax):
# prediction
predX = A*hatX[k]
predR = A*A*R[k]+B*B*sQW*sQW
# correction
gain = predR * H / (H*predR*H+sQV*sQV)
hatX[k+1] = predX + gain * (Y[k+1]-H*predX)
R[k+1] = (1-gain*H)*predR
return hatX, R
X,Y = sys_lin_esp_etat(EX0, sVX0, A, B, H, sQW, sQV)
espX, varX = sys_lin_loi(EX0, sVX0, A, B, sQW)
hatX, R = kalman(EX0, sVX0, A, B, H, sQW, sQV, Y)
plt.fill_between(range(kmax+1),espX+2*np.sqrt(varX),
espX-2*np.sqrt(varX),
color = 'g', alpha=0.12,
label=r'$\bar X_k\pm 2\,\sqrt{Q^X_k}$ (a priori)')
plt.fill_between(range(kmax+1),hatX+2*np.sqrt(R),
hatX-2*np.sqrt(R),
color = 'r', alpha=0.12,
label=r'$\hat X_k\pm 2\,\sqrt{R_k}$ (a posteriori)')
plt.plot(X,label=r'$X_k$')
plt.plot(espX,color='g',label=r'$\bar X_k$')
plt.plot(hatX,color='r',alpha=0.5,label=r'$\hat X_k$')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Système linéaire gaussien en tems discret
Step2: Un peu de vectorisation
Step3: Filtrage linéaire gaussien
|
10,357
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
A=np.asarray([[1,1,1], [1,1,2], [1,1,3], [1,1,4]])
B=np.asarray([[0,0,0], [1,0,2], [1,0,3], [1,0,4], [1,1,0], [1,1,1], [1,1,4]])
dims = np.maximum(B.max(0),A.max(0))+1
output = A[~np.in1d(np.ravel_multi_index(A.T,dims),np.ravel_multi_index(B.T,dims))]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
10,358
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
import urllib.request as ulib
def get_html(address, source="https://fr.wikipedia.org/wiki/"):
with ulib.urlopen(source+address) as u:
return u.read()
get_html("http://www.xavierdupre.fr", source="")[:100]
def get_first_link(soup):
for p in soup.find('div',{'id':'bodyContent'}).findAll('p'):
for a in p.findAll('a'):
if a and a.get('href').startswith('/wiki/') and not ":" in a.get('href'):
return a.get('href')[6:]
from bs4 import BeautifulSoup
stru = BeautifulSoup(get_html("Python_(langage)"), "lxml")
get_first_link(stru)
def get_to_philosophy(initial_address, max_iterations=100, verbose=False):
target_page = "Philosophie"
iteration = 0
pages_visited = []
current_address = initial_address
if verbose:
print("\ninitial address: " + current_address+"\n Will you go to "+target_page+" ?...\n")
while iteration < max_iterations:
current_address = get_first_link(BeautifulSoup(get_html(current_address), "lxml"))
if current_address is None:
break
if verbose:
print(current_address)
if current_address in pages_visited:
print("Boucle de " + str(iteration - pages_visited.index(current_address)) +
" noeuds trouvée à partir de "+str(pages_visited.index(current_address))+" itérations")
return
elif current_address.lower() == target_page.lower():
print(str(iteration) + " itérations pour arriver à la page Philosophie")
return
else:
pages_visited.extend([current_address])
iteration += 1
return str(max_iterations)+" itérations atteintes"
get_to_philosophy("Python_(langage)", verbose=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solution de Félix Revert.
Step2: Exercice 2
|
10,359
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, pi, erf
import scipy.stats as ss
d = np.random.rand(10)
print (d)
print (ss.rankdata(d))
import pandas as pd
data = pd.read_csv('grades.csv')
#get some info:
data.info()
data /= data.iloc[0,:]
data *= 100
data.mean(axis=0)
plt.title('HW1')
plt.hist(data.HW1)
plt.show()
plt.title('HW2')
plt.hist(data.HW2)
plt.show()
ss.ranksums(data['HW1'], data['HW2'])
ss.ranksums(data['HW1'], data['HW5'])
ss.wilcoxon(data.HW1, data.HW2)
ss.wilcoxon(data.HW1, data.HW5)
#build a list of all the HW indices
index = []
for i in range(1,7):
index.append('HW{}'.format(i))
#access those homeworks and then take the mean along the columns
hw_means = data[index].mean(axis=1)
plt.plot(hw_means, data.Midterm, 'o')
plt.show()
ss.spearmanr(hw_means, data.Midterm)
np.corrcoef(hw_means, data.Midterm)
print('p-value is', (1 - ss.poisson.cdf(14, mu=6.3)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Detour
Step2: Nonparametric Methods for Comparing Sets of Measurements
Step3: I'm going to standardize the homework so that they are all out of 100%. The first row contains perfect scores on each.
Step4: The $p$-value is 0.70, so we cannot rule out the null hypothesis that they are from the same distribution. What about a more recent difficult homework?
Step5: So HW 1 and HW 5 were significantly different.
Step6: Notice that the p-values are lower relative to the unmatched sum of ranks test, meaning have paired data allows us to be more certain in our conclusions.
Step7: Remarkable!
Step8: Tests For Counts (Parametric)
|
10,360
|
<ASSISTANT_TASK:>
Python Code:
from bs4 import BeautifulSoup
from urllib.request import urlopen
html = urlopen("http://static.decontextualize.com/cats.html").read()
document = BeautifulSoup(html, "html.parser")
cafe_list = list()
cafe_table = document.find('table', {'class': 'cafe-list'})
tbody = cafe_table.find('tbody')
for tr_tag in tbody.find_all('tr'):
cafe_dict = {}
cafe_dict['name'] = str(tr_tag.find('td', {'class': 'name'}).string)
cafe_dict['zip'] = str(tr_tag.find('td', {'class': 'zip'}).string)
cafe_list.append(cafe_dict)
cafe_list
cat_list = list()
cat_table = document.find('table', {'class': 'cat-list'})
tbody = cat_table.find('tbody')
for tr_tag in tbody.find_all('tr'):
cat_dict = {}
cat_dict['name'] = str(tr_tag.find('td', {'class': 'name'}).string)
cat_dict['birthdate'] = str(tr_tag.find('td', {'class': 'birthdate'}).string)
cat_dict['weight'] = float(tr_tag.find('td', {'class': 'weight'}).string)
cat_dict['color'] = str(tr_tag.find('td', {'class': 'color'}).string)
cat_dict['pattern'] = str(tr_tag.find('td', {'class': 'pattern'}).string)
cat_dict['locations'] = str(tr_tag.find('td', {'class': 'locations'}).string).split(', ')
cat_list.append(cat_dict)
cat_list
!pip install pg8000
import pg8000
conn = pg8000.connect(database="catcafes", user="rebeccaschuetz")
conn.rollback()
cursor = conn.cursor()
cursor.execute(
CREATE TABLE cafe (
id serial,
name varchar(40),
zip varchar(5)
)
)
cursor.execute(
CREATE TABLE cat (
id serial,
name varchar(20),
birthdate varchar(10),
weight numeric,
color varchar(20),
pattern varchar(20)
)
)
cursor.execute(
CREATE TABLE cat_cafe (
cat_id int,
cafe_id int,
active boolean
)
)
conn.commit()
cafe_name_id_map = {}
for item in cafe_list:
cursor.execute("INSERT INTO cafe (name, zip) VALUES (%s, %s) RETURNING id",
[str(item['name']), str(item['zip'])])
rowid = cursor.fetchone()[0]
cafe_name_id_map[str(item['name'])] = rowid
conn.commit()
cafe_name_id_map
for cat in cat_list:
for location in cat['locations']:
if location.endswith('*'):
print(location)
for kitten in cat_list:
cursor.execute("INSERT INTO cat (name, birthdate, weight, color, pattern) VALUES (%s, %s, %s, %s, %s) RETURNING id",
[kitten['name'], kitten['birthdate'], kitten['weight'], kitten['color'], kitten['pattern']])
rowid = cursor.fetchone()[0]
for location in kitten['locations']:
if location.endswith('*'):
active_val = True
cafe_id_val = cafe_name_id_map[location.replace('*','')]
else:
active_val = False
cafe_id_val = cafe_name_id_map[location]
cursor.execute("INSERT INTO cat_cafe (cat_id, cafe_id, active) VALUES (%s, %s, %s)",
[rowid, cafe_id_val, active_val])
conn.commit()
cursor.execute("SELECT max(birthdate) FROM cat")
birthdate = cursor.fetchone()[0]
cursor.execute("SELECT name FROM cat WHERE birthdate = %s", [birthdate])
print(cursor.fetchone()[0])
cursor.execute(SELECT DISTINCT(cafe.zip)
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat.color = 'lilac' AND cat.pattern = 'tabby' AND cat_cafe.active = true
)
print(', '.join([x[0] for x in cursor.fetchall()]))
cursor.execute(
SELECT cafe.name, avg(cat.weight)
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat_cafe.active = true
GROUP BY cafe.name
)
for rec in cursor.fetchall():
print(rec[0]+":", "%0.2f" % rec[1])
cursor.execute(
SELECT cafe.name
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat_cafe.active = true AND cat.pattern = 'tortoiseshell'
GROUP BY cafe.name
ORDER BY count(cat.name) DESC
LIMIT 1
)
print(cursor.fetchone()[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's tackle the list of cafes first. In the cell below, write some code that creates a list of dictionaries with information about each cafe, assigning it to the variable cafe_list. I've written some of the code for you; you just need to fill in the rest. The list should end up looking like this
Step2: Great! In the following cell, write some code that creates a list of cats from the <table> tag on the page, storing them as a list of dictionaries in a variable called cat_list. Again, I've written a bit of the code for you. Expected output
Step3: Problem set #2
Step4: Here's a cell you can run if something goes wrong and you need to rollback the current query session
Step8: In the cell below, you're going to create three tables, necessary to represent the data you scraped above. I've given the basic framework of the Python code and SQL statements to create these tables. I've given the entire CREATE TABLE statement for the cafe table, but for the other two, you'll need to supply the field names and the data types for each column. If you're unsure what to call the fields, or what fields should be in the tables, consult the queries in "The Queries" below. Hints
Step9: After executing the above cell, issuing a \d command in psql should yield something that looks like the following
Step10: Issuing SELECT * FROM cafe in the psql client should yield something that looks like this
Step11: The dictionary maps the name of the cat cafe to its ID in the database. You'll need these values later when you're adding records to the linking table (cat_cafe).
Step12: Issuing a SELECT * FROM cat LIMIT 10 in psql should yield something that looks like this
Step14: In which zip codes can I find a lilac-colored tabby?
Step16: What's the average weight of cats currently residing at all locations?
Step18: Which location has the most cats with tortoiseshell coats?
|
10,361
|
<ASSISTANT_TASK:>
Python Code:
lambda x,y : x%y
length_func = lambda x: len(x)
length_func("hello, there!")
# Even or Odd lambda
even_odd = lambda x: x % 2 == 0
even_odd = lambda x: True if x % 2 == 0 else False
even_odd(9)
# script that "converts" a tuple to a list
some_tup = ("[", 3, 4, "hello", "]")
x = lambda tup: ", ".join(str(item) for item in tup)
print(x(some_tup))
sorted([4, 2, 8, 5, 2, 9])
sorted([4, 2, 8, 5, 2, 9], key=lambda x: x%2 == 0)
sorted([1, 5, 2, 5, 2, 9, 4], reverse=True)
lst = [1, 3, 4,5]
lst[::-1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As seen, when we assign a lambda expression to a label we can use it. Note that at default lambda expressions return the expected type of whatever it is handling. If you send in a number and do a numerical operation, you will receive back a number, a string a string, etc.
Step2: As we can see, the lambda expressions returns the number we expected. How do we return a True or False value? Note
Step3: Soon, we will learn about creating our own classes, and eventually data structures. When we learn how to make our own data structures, we'll rewrite what are "conventionally" (take it lightly) known as a "magic methods". These methods are not called upon explicitly, but are triggered by some internal action that Python sees you carried out. For example the + in Python triggers the magic method
Step4: Somewhat scrappy, but let's just say it's pretty close to looking like an actual list. Strings are not mutable so we can't just add a bracket at the beginning or right at the end. There are certain ways to make this possible, however. Operations like this are better suited for functions, anyways.
Step5: However, we have much more power with the key. The key accepts a form of some type of filter. We can send in a lambda expression to change what sorted means to us. What if we wanted the even numbers to be at the end?
Step6: What if we wanted the numbers to be reversed, such as in descending order?
|
10,362
|
<ASSISTANT_TASK:>
Python Code:
a = Table()
a.meta['dt'] = 0.0001 # time step, in seconds
a.meta['duration'] = 200 # length of time, in seconds
a.meta['omega'] = 2*np.pi # angular frequency, in radians
a.meta['phi'] = 0.0 # offset angle, in radians
freq = fftpack.fftfreq(len(a), d=a.meta['dt'])
nyq_ind = int(len(a)/2.) # the index of the last positive Fourier frequency
freq = fftpack.fftfreq(len(psd), d=dt)
nyq_ind = int(len(psd)/2.) # the index of the last positive Fourier frequency
def make_lc(events, start, end, dt):
time = np.asarray(j1535['TIME']) ## Doing this so that we can re-run
seg_length = 32. # seconds
dt = 1./128.# seconds
n_bins = int(seg_length/dt) # Number of time bins in a segment of light curve
psd_avg = np.zeros(n_bins) # initiating, to keep running sum (then avearge at end)
n_seg = 0
for (start_gti, stop_gti) in zip(gti_tab['START'], gti_tab['STOP']):
start_time = start_gti
end_time = start_time + seg_length
while end_time <= stop_gti:
## Make a mask of events in this segment
## Keep the stuff not in this segment for next time
## Make the light curve
## Turn that into a power spectrum
## Keep a running sum (to average at end)
## Print out progress
if n_seg % 5 == 0:
print(n_seg)
## Incrementing for next loop
n_seg += 1
start_time += seg_length
end_time += seg_length
## Divide summed powers by n_seg to get the average
def rebin(freq, power, err_power, rebin_factor=1.05):
Re-bin the power spectrum in frequency space by some re-binning factor
(rebin_factor > 1). This is sometimes called 'geometric re-binning' or
'logarithmic re-binning', as opposed to linear re-binning
(e.g., grouping by 2)
Parameters
----------
freq : np.array of floats
1-D array of the Fourier frequencies.
power : np.array of floats
1-D array of the power at each Fourier frequency, with any/arbitrary
normalization.
err_power : np.array of floats
1-D array of the error on the power at each Fourier frequency, with the
same normalization as the power.
rebin_factor : float
The factor by which the data are geometrically re-binned.
Returns
-------
rb_freq : np.array of floats
1-D array of the re-binned Fourier frequencies.
rb_power : np.array of floats
1-D array of the power at the re-binned Fourier frequencies, with the
same normalization as the input power array.
rb_err : np.array of floats
1-D array of the error on the power at the re-binned Fourier
frequencies, with the same normalization as the input error on power.
assert rebin_factor >= 1.0
rb_power = np.asarray([]) # Array of re-binned power
rb_freq = np.asarray([]) # Array of re-binned frequencies
rb_err = np.asarray([]) # Array of error in re-binned power
real_index = 1.0 # The unrounded next index in power
int_index = 1 # The int of real_index, added to current_m every iteration
current_m = 1 # Current index in power
prev_m = 0 # Previous index m
## Loop through the length of the array power, new bin by new bin, to
## compute the average power and frequency of that new geometric bin.
while current_m < len(power):
return rb_freq, rb_power, rb_err
seg_length = 64. # seconds
dt = 1./8.# seconds
n_bins = int(seg_length/dt) # Number of time bins in a segment of light curve
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1a. Compute the time steps and a cosine harmonic with the above-defined properties.
Step2: 1e. Plot them!
Step3: Plot it!
Step4: 2c.ii. Sometimes, the detector is on and recording photons, but it's pointed too close to the Earth, or a structure on the spacecraft is occulting part of the view, or the instrument is moving through a zone of high particle background, or other things. The times when these things happen are recorded, and in data reduction you make a list of Good Time Intervals, or GTIs, which is when you can use good science data. I made a list of GTIs for this data file that are longer than 4 seconds long, which you can read in from "J1535_gti.fits".
Step6: Plot it! Use similar code I gave you above to make the array of Fourier frequencies and get the index of the Nyquist frequency.
Step7: Apply this to the data (using JUST the frequency, power, and error at positive Fourier frequencies). Start with a rebin factor of 1.03.
|
10,363
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import netCDF4
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.basemap import Basemap
datadir = './datafiles/'
datafile = 'GL_TS_DC_2300691.nc'
with netCDF4.Dataset(datadir + datafile) as nc:
lon = nc.variables['LONGITUDE'][:]
lat = nc.variables['LATITUDE'][:]
mpl.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(111)
plt.plot(lon, lat, 'ko', ms=1)
plt.text(lon[0], lat[0], 'Start', fontsize=16)
plt.text(lon[-1], lat[-1], 'End', fontsize=16)
plt.show()
lonmin, lonmax = ax.get_xlim()
latmin, latmax = ax.get_ylim()
m = Basemap(projection='merc', llcrnrlat=latmin, urcrnrlat=latmax,
llcrnrlon=lonmin-5, urcrnrlon=lonmax+5, lat_ts=20, resolution='i')
lon2, lat2 = m(lon, lat)
fig = plt.figure(figsize=(8,8))
m.plot(lon2, lat2, 'ko', ms=1)
m.drawcoastlines(linewidth=0.5, zorder=3)
m.fillcontinents(zorder=2)
m.drawparallels(np.arange(-90.,91.,5.), labels=[1,0,0,0], zorder=1)
m.drawmeridians(np.arange(-180.,181.,5.), labels=[0,1,0,0], zorder=1)
plt.show()
with netCDF4.Dataset(datadir + datafile) as nc:
temperature = nc.variables['TEMP'][:,1]
print temperature.shape
cmap = plt.cm.Spectral_r
norm = colors.Normalize(vmin=26, vmax=29)
fig = plt.figure(figsize=(8,8))
m.scatter(lon2, lat2, s=10, c=temperature, edgecolor='None', cmap=cmap, norm=norm)
m.drawcoastlines(linewidth=0.5, zorder=3)
m.fillcontinents(zorder=2)
m.drawparallels(np.arange(-90.,91.,5.), labels=[1,0,0,0], zorder=1)
m.drawmeridians(np.arange(-180.,181.,5.), labels=[0,0,1,0], zorder=1)
plt.colorbar(extend='both', cmap=cmap, norm=norm)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data reading
Step2: We extract only the spatial coordinates
Step3: Basic plot
Step4: We will also indicate Start and End labels at the corresponding locations.
Step5: The figure can be improved by adding the landmask and coastline.
Step6: then we create the projection, slightly enlarging the longitude extension
Step7: The new figure is generated similarly to the previous one
Step8: Scatter plot
Step9: For the plot, we will keep the same projection.<br/>
Step10: We specify the coordinates and the temperature as the arguments of the scatter plot
|
10,364
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import scipy.linalg as la
def signal(t):
return 1-(t-2)**2 if (t<3 and t>1) else 0
num_samples = rows = cols = 2**12
time_list = linspace(0.0001,4,num_samples)
signal_list = [signal(time) for time in time_list]
plot(time_list,signal_list)
def DFT(x,inverse=False):
x = asarray(x,dtype=float)
n = arange(num_samples)
k = n.reshape((num_samples,1))
mat = exp(2j * pi * k *n/num_samples)
return dot(la.inv(mat),x) if inverse else dot(mat,x)
g = DFT(signal_list)
plot(absolute(g))
f = DFT(g,inverse=True)
plot(time_list,f.real)
g[0] = g[-1] = 0
plot(absolute(g))
f = DFT(g,inverse=True)
plot(time_list,f.real)
plot(time_list,signal_list)
def FFT(x):
x = asarray(x,dtype=float)
N = x.shape[0]
if N == 1:
return x
else:
X_even = FFT(x[0::2])
X_odd = FFT(x[1::2])
factor = exp(-2j * pi * arange(N) / N)
return concatenate([X_even + factor[:N / 2] * X_odd, X_even + factor[N / 2:] * X_odd])
h = FFT(signal_list)
plot(absolute(h))
%timeit DFT(signal_list)
%timeit FFT(signal_list)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating the signal
Step2: Now we create the function that will perform the discrete fourier transform.
Step3: A look into the mathematical treatment of the DFT should convince you that this is the way to go.
Step4: Regenerating the signal
Step5: This verifies that the DFT we obtained was correct.
Step6: Regenerating the modified signal
Step7: Notice that the amplitude of the regenerated signal is a lot less than the original signal. This is attributed to the fact that when you take away the contributions due to certian frequencies, a lesser number of harmonics generate the new signal and this signal has a lower amplitude.
Step8: We intend to use recursion as our friend in this one. So we'll define a function which implements this recursion. Why we define the function this particular way will be clear if you look at the mathematical treatment for the optimization of the FFT.
Step9: Taking the Fast Fourier Transform
Step10: Time Comparison
Step11: Fast Fourier Transform
|
10,365
|
<ASSISTANT_TASK:>
Python Code:
import google.datalab.bigquery as bq
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn import cross_validation as cv
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import mean_squared_error
from math import sqrt
%%sql
select * from [fiery-set-171213:vrec.sam_exclusions] limit 5
%%sql
select Exclusion_Type from [fiery-set-171213:vrec.sam_exclusions] group by 1;
%%sql
select Classification from [fiery-set-171213:vrec.sam_exclusions] group by 1;
%%sql
select
count(*)
from [fiery-set-171213:vrec.sam_exclusions]
where Classification in ('Firm')
;
%%bq query -n df_query
select
EXTRACT(YEAR FROM Active_Date) as year,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
and Active_Date is not NULL
group by 1
order by 1;
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.plot(kind='bar', x='year', title='Excluded Firms per year', figsize=(15,8))
ax.set_xlabel('Year')
ax.set_ylabel('count')
%%bq query
select
#Name,
SAM_Number,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
#and Active_Date is not NULL
group by 1
order by 2 DESC
limit 5;
%%bq query
select
NPI,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
#and CAGE is not NULL
group by 1
order by 2 DESC
limit 5;
%%bq query
select
CAGE,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
#and CAGE is not NULL
group by 1
order by 2 DESC
limit 5;
%%bq query
select *
from `fiery-set-171213.vrec.fapiis`
limit 5
%%bq query -n df_query
select
EXTRACT(YEAR FROM RECORD_DATE) as year,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1
order by 1;
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.plot(kind='bar', x='year', title='Firms by Record date', figsize=(10,5))
ax.set_xlabel('Year')
ax.set_ylabel('count')
%%bq query -n df_query
select
EXTRACT(YEAR FROM TERMINATION_DATE) as year,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1
order by 1;
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.plot(kind='bar', x='year', title='Firms by termination date', figsize=(10,5))
ax.set_xlabel('Year')
ax.set_ylabel('count')
%%bq query
select
AWARDEE_NAME,
DUNS,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1,2
order by 3 DESC
limit 5;
%%bq query
select
*
from `fiery-set-171213.vrec.fapiis`
where AWARDEE_NAME in ('ALPHA RAPID ENGINEERING SOLUTIONS')
limit 5;
%%bq query
select
RECORD_TYPE,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1
order by 2 DESC
%%bq query -n df_query
select count(*) as transactions
from `fiery-set-171213.vrec.usa_spending_all`
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
%%bq query
select *
from `fiery-set-171213.vrec.usa_spending_all`
where mod_agency in ('1700: DEPT OF THE NAVY')
limit 5
%%bq query -n df_query
select
#substr(signeddate, 1, 2) month,
fiscal_year as year,
count(*) transactions,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by year
order by year asc
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.set_index('year')['dollarsobligated'].plot(kind='bar', title='Government purchases by years')
ax.set_ylabel('dollars obligated')
%%bq query -n df_query
select
fiscal_year as year,
sum(dollarsobligated)/count(*) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by year
order by year asc
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.set_index('year')['dollarsobligated'].plot(kind='bar', title='avg. transaction size by years')
ax.set_ylabel('dollars obligated')
%%bq query
select
maj_agency_cat,
mod_agency,
count(*)
from `fiery-set-171213.vrec.usa_spending_all`
group by 1,2
order by 3 DESC
limit 20
%%bq query
select
mod_parent,
vendorname,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by 1,2
order by 3 DESC
limit 20
%%bq query
select
productorservicecode,
systemequipmentcode,
claimantprogramcode,
principalnaicscode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where vendorname in ('LOCKHEED MARTIN CORPORATION')
group by 1,2,3,4
order by 5 DESC
limit 20
%%bq query
select
#mod_parent,
vendorname,
systemequipmentcode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where productorservicecode in ('1510: AIRCRAFT, FIXED WING')
group by 1,2
order by 3 DESC
limit 20
%%bq query
select
vendorname,
systemequipmentcode,
claimantprogramcode,
principalnaicscode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where productorservicecode in ('1510: AIRCRAFT, FIXED WING')
and contractingofficerbusinesssizedetermination in ('S: SMALL BUSINESS')
group by 1,2,3,4
order by dollarsobligated DESC
limit 20
%%bq query
select
*
from `gpqueries.contracts.raw`
where productorservicecode in ('1510: AIRCRAFT, FIXED WING')
and contractingofficerbusinesssizedetermination in ('S: SMALL BUSINESS')
limit 1
%%bq query
select
claimantprogramcode,
principalnaicscode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1,2
order by dollarsobligated DESC
limit 10
%%bq query -n df_query
select
fiscal_year,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1
order by 1
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
plt = df.set_index('fiscal_year')['dollarsobligated'].plot(kind='bar', title='transactions amount for SMBs')
%%bq query -n df_query
#%%sql
select
smb.fiscal_year,
sum(smb.transaction) as smb,
sum(total.transaction) as total,
sum(smb.transaction)/sum(total.transaction) as percentage
from
(select
fiscal_year,
sum(dollarsobligated) as transaction
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1) as smb
join
(select
fiscal_year,
sum(dollarsobligated) as transaction
from `fiery-set-171213.vrec.usa_spending_all`
group by 1) as total
on smb.fiscal_year = total.fiscal_year
group by 1
order by 1
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
plt = df.set_index('fiscal_year')['percentage'].plot(kind='bar', title='dollars % for SMBs')
%%bq query
select
smb.principalnaicscode as principalnaicscode,
sum(total.count) as count,
sum(smb.dollarsobligated) as dollarsobligated_smb,
sum(total.dollarsobligated) as dollarsobligated_total,
sum(smb.dollarsobligated)/sum(total.dollarsobligated) as smb_percentage
from
(select
principalnaicscode,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1) as smb
join
(select
principalnaicscode,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by 1
having dollarsobligated > 0
) as total
on smb.principalnaicscode = total.principalnaicscode
group by 1
order by 5 DESC
limit 10
%%bq query -n df_query
select
maj_agency_cat,
#mod_agency,
#contractactiontype,
#typeofcontractpricing,
#performancebasedservicecontract,
state,
#vendorcountrycode,
#principalnaicscode,
contractingofficerbusinesssizedetermination,
#sum(dollarsobligated) as dollarsobligated
dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where vendorcountrycode in ('UNITED STATES', 'USA: UNITED STATES OF AMERICA')
and contractingofficerbusinesssizedetermination in ('O: OTHER THAN SMALL BUSINESS', 'S: SMALL BUSINESS')
and dollarsobligated > 0
#group by 1,2,3
limit 20000
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
# Create dummy variable using pandas function get_dummies
df1 = df.join(pd.get_dummies(df['maj_agency_cat']))
df1 = df1.join(pd.get_dummies(df['state']))
df1 = df1.join(pd.get_dummies(df['contractingofficerbusinesssizedetermination']))
df1 = df1.drop('maj_agency_cat', axis = 1)
df1 = df1.drop('state', axis = 1)
df1 = df1.drop('contractingofficerbusinesssizedetermination', axis = 1)
df1.head()
train_data = df1.iloc[:,1:]
train_labels = df[['dollarsobligated']]
lm = LinearRegression()
lm.fit(train_data, train_labels)
# The coefficients
print('Coefficients: \n', lm.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((lm.predict(train_data) - train_labels) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % lm.score(train_data, train_labels))
%%bq query -n df_query
select
vendorname,
maj_agency_cat,
state,
contractingofficerbusinesssizedetermination,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where vendorcountrycode in ('UNITED STATES', 'USA: UNITED STATES OF AMERICA')
and contractingofficerbusinesssizedetermination in ('O: OTHER THAN SMALL BUSINESS', 'S: SMALL BUSINESS')
and dollarsobligated > 0
group by 1,2,3,4
limit 20000
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
#Create dummy variable using pandas function get_dummies
df1 = df.join(pd.get_dummies(df['maj_agency_cat']))
df1 = df1.join(pd.get_dummies(df['state']))
df1 = df1.join(pd.get_dummies(df['contractingofficerbusinesssizedetermination']))
df1 = df1.drop('maj_agency_cat', axis = 1)
df1 = df1.drop('state', axis = 1)
df1 = df1.drop('contractingofficerbusinesssizedetermination', axis = 1)
df1 = df1.drop('vendorname', axis = 1)
df1 = df1.drop('dollarsobligated', axis = 1)
train_data = df1.iloc[:,1:]
train_labels = df[['count']]
lm = LinearRegression()
lm.fit(train_data, train_labels)
# The coefficients
print('Coefficients: \n', lm.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((lm.predict(train_data) - train_labels) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % lm.score(train_data, train_labels))
%%bq query
select
#principalnaicscode,
fiscal_year,
maj_agency_cat,
#contractingofficerbusinesssizedetermination,
#vendorname,
productorservicecode,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
#where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
#where regexp_contains(principalnaicscode, "CONSTRUCTION")
#and regexp_contains(maj_agency_cat, "AGRICULTURE")
where regexp_contains(productorservicecode, "MEAT")
#and fiscal_year = 2016
group by 1,2,3
order by dollarsobligated DESC
limit 10
%%bq query -n df_query
select
contractingofficerbusinesssizedetermination,
mod_agency,
vendorname,
count(*) as count
from `fiery-set-171213.vrec.usa_spending_all`
where vendorcountrycode in ('UNITED STATES', 'USA: UNITED STATES OF AMERICA')
and contractingofficerbusinesssizedetermination in ('O: OTHER THAN SMALL BUSINESS', 'S: SMALL BUSINESS')
and mod_agency not in ("")
group by 1,2,3
order by count DESC
limit 20000
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
df1 = df.drop('contractingofficerbusinesssizedetermination', axis = 1)
n_agency = df1.mod_agency.unique().shape[0]
n_vendors = df1.vendorname.unique().shape[0]
print 'Number of gov agency = ' + str(n_agency) + ' | Number of vendors = ' + str(n_vendors)
# Convert categorial value with label encoding
le_agency = LabelEncoder()
label_agency = le_agency.fit_transform(df1['mod_agency'])
le_vendor = LabelEncoder()
label_vendor = le_vendor.fit_transform(df1['vendorname'])
df_agency = pd.DataFrame(label_agency)
df_vendor = pd.DataFrame(label_vendor)
df2 = pd.concat([df_agency, df_vendor], axis = 1)
df2 = pd.concat([df2, df1['count']], axis = 1)
df2.columns = ['mod_agency', 'vendorname', 'count']
df2.head(5)
# To ge the right label back
# le_agency.inverse_transform([173, 100])
# Split into training and test data set
train_data, test_data = cv.train_test_split(df2, test_size=0.25)
#Build the matrix
train_data_matrix = np.zeros((n_agency, n_vendors))
for line in train_data.itertuples():
train_data_matrix[line[1]-1, line[2]-1] = line[3]
test_data_matrix = np.zeros((n_agency, n_vendors))
for line in test_data.itertuples():
test_data_matrix[line[1]-1, line[2]-1] = line[3]
#Compute cosine distance
user_similarity = pairwise_distances(train_data_matrix, metric='cosine')
item_similarity = pairwise_distances(train_data_matrix.T, metric='cosine')
def predict(ratings, similarity, type='user'):
if type == 'user':
mean_user_rating = ratings.mean(axis=1)
#You use np.newaxis so that mean_user_rating has same format as ratings
ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
elif type == 'item':
pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
return pred
item_prediction = predict(train_data_matrix, item_similarity, type='item')
user_prediction = predict(train_data_matrix, user_similarity, type='user')
# Evaluation
def rmse(prediction, ground_truth):
prediction = prediction[ground_truth.nonzero()].flatten()
ground_truth = ground_truth[ground_truth.nonzero()].flatten() #filter out all items with no 0 as we only want to predict in the test set
return sqrt(mean_squared_error(prediction, ground_truth))
print 'User-based CF RMSE: ' + str(rmse(user_prediction, test_data_matrix))
print 'Item-based CF RMSE: ' + str(rmse(item_prediction, test_data_matrix))
print 'Worklow 1'
print '=' * 100
print 'Select your agency:'
agency = df1['mod_agency'][1]
print agency
print '=' * 100
print '1. Have you considered working with these SMB companies (user prediction?'
agency = le_agency.transform(agency)
vendor_reco = pd.DataFrame(user_prediction[agency, :])
labels = pd.DataFrame(le_vendor.inverse_transform(range(0, len(vendor_reco))))
df_reco = pd.concat([vendor_reco, labels], axis = 1)
df_reco.columns = ['reco_score', 'vendorname']
#Join to get the SMB list
df_smb = df.drop(['mod_agency', 'count'], axis = 1)
df_reco = df_reco.set_index('vendorname').join(df_smb.set_index('vendorname'))
df_reco = df_reco.sort_values(['reco_score'], ascending = [0])
df_reco[df_reco['contractingofficerbusinesssizedetermination'] == 'S: SMALL BUSINESS'].head(10)
print '=' * 100
print '2. Have you considered working with these SMB companies (item-item prediction?'
vendor_reco = pd.DataFrame(item_prediction[agency, :])
df_reco = pd.concat([vendor_reco, labels], axis = 1)
df_reco.columns = ['reco_score', 'vendorname']
df_reco = df_reco.set_index('vendorname').join(df_smb.set_index('vendorname'))
df_reco = df_reco.sort_values(['reco_score'], ascending = [0])
df_reco[df_reco['contractingofficerbusinesssizedetermination'] == 'S: SMALL BUSINESS'].head(10)
print 'Worklow 2'
print '=' * 100
print 'Select a vendor:'
# Workflow 2 - WIP
# Select a vendor
# Other similar vendor
%%sql
select
substr(productorservicecode, 1, 4) product_id,
first(substr(productorservicecode, 7)) product_name,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
product_id
order by
sum_dollarsobligated desc
limit 10
%%sql
select
substr(agencyid, 1, 4) agency_id,
first(substr(agencyid, 7)) agency_name,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
where
productorservicecode contains 'software'
group by
agency_id
order by
sum_dollarsobligated desc
ignore case
%%sql
select
substr(agencyid, 1, 4) agency_id,
first(substr(agencyid, 7)) agency_name,
substr(principalnaicscode, 1, 6) naics_id,
first(substr(principalnaicscode, 9)) naics_name,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
where
principalnaicscode contains 'software' and
fiscal_year = 2015
group by
agency_id, naics_id
order by
sum_dollarsobligated desc
ignore case
%%sql
select
fiscal_year,
dollarsobligated,
vendorname, city, state, annualrevenue, numberofemployees,
descriptionofcontractrequirement
from
gpqueries:contracts.raw
where
agencyid contains 'transportation security administration' and
principalnaicscode contains 'computer and software stores'
ignore case
%%sql
select
agencyid,
dollarsobligated,
vendorname,
descriptionofcontractrequirement
from
gpqueries:contracts.raw
where
vendorname contains 'tableau' or
vendorname contains 'socrata' or
vendorname contains 'palantir' or
vendorname contains 'revolution analytics' or
vendorname contains 'mathworks' or
vendorname contains 'statacorp' or
vendorname contains 'mathworks'
order by
dollarsobligated desc
limit
100
ignore case
%%sql
select
agencyid,
dollarsobligated,
descriptionofcontractrequirement
from
gpqueries:contracts.raw
where
descriptionofcontractrequirement contains 'body camera'
limit
100
ignore case
%%sql
select
substr(pop_state_code, 1, 2) state_code,
first(substr(pop_state_code, 4)) state_name,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
state_code
order by
sum_dollarsobligated desc
%%sql --module gpq
define query vendor_size_by_agency
select
substr(agencyid, 1, 4) agency_id,
first(substr(agencyid, 7)) agency_name,
nth(11, quantiles(annualrevenue, 21)) vendor_median_annualrevenue,
nth(11, quantiles(numberofemployees, 21)) vendor_median_numberofemployees,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
agency_id
having
transactions > 1000 and
sum_dollarsobligated > 10e6
order by
vendor_median_annualrevenue asc
bq.Query(gpq.vendor_size_by_agency).to_dataframe()
%%sql
select
womenownedflag,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
womenownedflag
%%sql
select
womenownedflag, veteranownedflag, minorityownedbusinessflag,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
womenownedflag, veteranownedflag, minorityownedbusinessflag
order by
womenownedflag, veteranownedflag, minorityownedbusinessflag desc
%%sql
select
sum(if(before2015.dunsnumber is null, 1, 0)) new_vendors,
sum(if(before2015.dunsnumber is null, 0, 1)) old_vendors
from
flatten((select unique(dunsnumber) dunsnumber from gpqueries:contracts.raw where fiscal_year = 2015), dunsnumber) in2015
left join
flatten((select unique(dunsnumber) dunsnumber from gpqueries:contracts.raw where fiscal_year < 2015), dunsnumber) before2015
on before2015.dunsnumber = in2015.dunsnumber
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SAM (System for Award Management) - exclusions
Step2: There are 8,659 firms on the SAM exclusion list
Step3: NPI and CAGE don't seem to be great keys to join the data - ideally we can use SAM
Step4: FAPIIS is not bad with 3002 DUNS code but time range goes only from 2012 to 2017
Step5: Which means we're dealing with 49.5M transactions totalling 6.7 trillion dollars. These purchases came from 622k vendors that won 2.2mn solicitations issued by government agencies.
Step6: Understanding where the budget is spent
Step7: Looking at SMBs by year
Step8: SMB contract by gov. agency & by naics code
Step9: Simple Linear regression (LR)
Step10: LR
Step11: MVP
Step12: MVP 2 - Collaborative filtering
Step13: Worklow 1
Step14: OTHERS - FROM TUTORIAL - Anton Tarasenko
Step15: You can find agencies that buy products like yours. If it's "software"
Step16: What Firms in Your Industry Sell to the Government
Step17: Inspecting Specific Transactions
Step18: Alternatively, specify vendors your product relates to and check how the government uses it. Top deals in data analytics
Step19: Searching Through Descriptions
Step20: Some rows of descriptionofcontractrequirement contain codes like "IGF
Step21: Facts about Government Contracting
Step22: The median shows the most likely supplier. Agencies on the top of the table actively employ vendors whose annual revenue is less than $1mn.
Step23: Women-owned businesses make about one tenth of the transactions, but their share in terms of sales is only 3.7%.
Step24: For example, firms owned by women, veterans, and minorities (all represented at the same time) sell $5bn in goods and services. That's 0.07% of all government purchases.
|
10,366
|
<ASSISTANT_TASK:>
Python Code:
from urllib.request import urlretrieve
from os.path import isfile
from tqdm import tqdm
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('train.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Train Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/train.p',
'train.p',
pbar.hook)
if not isfile('test.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Test Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/test.p',
'test.p',
pbar.hook)
print('Training and Test data downloaded.')
import pickle
import numpy as np
import math
# Fix error with TF and Keras
import tensorflow as tf
tf.python.control_flow_ops = tf
print('Modules loaded.')
with open('train.p', 'rb') as f:
data = pickle.load(f)
# TODO: Load the feature data to the variable X_train
X_train = data['features']
# TODO: Load the label data to the variable y_train
y_train = data['labels']
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert np.array_equal(X_train, data['features']), 'X_train not set to data[\'features\'].'
assert np.array_equal(y_train, data['labels']), 'y_train not set to data[\'labels\'].'
print('Tests passed.')
# TODO: Shuffle the data
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert X_train.shape == data['features'].shape, 'X_train has changed shape. The shape shouldn\'t change when shuffling.'
assert y_train.shape == data['labels'].shape, 'y_train has changed shape. The shape shouldn\'t change when shuffling.'
assert not np.array_equal(X_train, data['features']), 'X_train not shuffled.'
assert not np.array_equal(y_train, data['labels']), 'y_train not shuffled.'
print('Tests passed.')
# TODO: Normalize the data features to the variable X_normalized
def normalize(image_data):
a = -0.5
b = 0.5
color_min = 0.0
color_max = 255.0
return a + ( ( (image_data - color_min) * (b - a) )/(color_max - color_min))
X_normalized = normalize(X_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert math.isclose(np.min(X_normalized), -0.5, abs_tol=1e-5) and math.isclose(np.max(X_normalized), 0.5, abs_tol=1e-5), 'The range of the training data is: {} to {}. It must be -0.5 to 0.5'.format(np.min(X_normalized), np.max(X_normalized))
print('Tests passed.')
# TODO: One Hot encode the labels to the variable y_one_hot
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
import collections
assert y_one_hot.shape == (39209, 43), 'y_one_hot is not the correct shape. It\'s {}, it should be (39209, 43)'.format(y_one_hot.shape)
assert next((False for y in y_one_hot if collections.Counter(y) != {0: 42, 1: 1}), True), 'y_one_hot not one-hot encoded.'
print('Tests passed.')
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
model = Sequential()
# TODO: Build a Multi-layer feedforward neural network with Keras here.
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.activations import relu, softmax
def check_layers(layers, true_layers):
assert len(true_layers) != 0, 'No layers found'
for layer_i in range(len(layers)):
assert isinstance(true_layers[layer_i], layers[layer_i]), 'Layer {} is not a {} layer'.format(layer_i+1, layers[layer_i].__name__)
assert len(true_layers) == len(layers), '{} layers found, should be {} layers'.format(len(true_layers), len(layers))
check_layers([Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[1].output_shape == (None, 128), 'Second layer output is wrong, it should be (128)'
assert model.layers[2].activation == relu, 'Third layer not a relu activation layer'
assert model.layers[3].output_shape == (None, 43), 'Fourth layer output is wrong, it should be (43)'
assert model.layers[4].activation == softmax, 'Fifth layer not a softmax activation layer'
print('Tests passed.')
# TODO: Compile and train the model here.
# Configures the learning process and metrics
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
# Train the model
# History is a record of training loss and metrics
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.optimizers import Adam
assert model.loss == 'categorical_crossentropy', 'Not using categorical_crossentropy loss function'
assert isinstance(model.optimizer, Adam), 'Not using adam optimizer'
assert len(history.history['acc']) == 10, 'You\'re using {} epochs when you need to use 10 epochs.'.format(len(history.history['acc']))
assert history.history['acc'][-1] > 0.92, 'The training accuracy was: %.3f. It shoud be greater than 0.92' % history.history['acc'][-1]
assert history.history['val_acc'][-1] > 0.85, 'The validation accuracy is: %.3f. It shoud be greater than 0.85' % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a convolutional layer before the flatten layer.
from keras.layers import Convolution2D
nb_filters = 32
kernel_size = [3, 3]
model = Sequential()
#Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=(32, 32, 3)))
#Add a ReLU activation after the convolutional layer.
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten())
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
check_layers([Convolution2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[0].nb_filter == 32, 'Wrong number of filters, it should be 32'
assert model.layers[0].nb_col == model.layers[0].nb_row == 3, 'Kernel size is wrong, it should be a 3x3'
assert model.layers[0].border_mode == 'valid', 'Wrong padding, it should be valid'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a pooling layer after the convolutional layer.
from keras.layers import MaxPooling2D
pool_size = [2, 2]
model = Sequential()
#Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=(32, 32, 3)))
#Add a 2x2 max pooling layer immediately following your convolutional layer.
model.add(MaxPooling2D(pool_size=pool_size))
#Add a ReLU activation after the convolutional layer.
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten())
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[1].pool_size == (2, 2), 'Second layer must be a max pool layer with pool size of 2x2'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add dropout after the pooling layer.
from keras.layers import Dropout
model = Sequential()
#Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=(32, 32, 3)))
#Add a 2x2 max pooling layer immediately following your convolutional layer.
model.add(MaxPooling2D(pool_size=pool_size))
#Add a dropout layer after the pooling layer. Set the dropout rate to 50%.
model.add(Dropout(0.5))
#Add a ReLU activation after the convolutional layer.
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten())
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Dropout, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[2].p == 0.5, 'Third layer should be a Dropout of 50%'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Build a model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=(32, 32, 3)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(43))
model.add(Activation('softmax'))
# TODO: Compile and train the model
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
# TODO: Load test data
with open('test.p', 'rb') as f:
samples = pickle.load(f)
X_test = samples['features']
y_test = samples['labels']
# TODO: Preprocess data & one-hot encode the labels
X_test, y_test = shuffle(X_test, y_test)
X_test = normalize(X_test)
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_test)
# TODO: Evaluate model on test data
score = model.evaluate(X_test, y_one_hot, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Load the Data
Step3: Preprocess the Data
Step4: Normalize the features
Step5: One-Hot Encode the labels
Step6: Keras Sequential Model
Step7: Training a Sequential Model
Step8: Convolutions
Step9: Pooling
Step10: Dropout
Step11: Optimization
Step12: Best Validation Accuracy
|
10,367
|
<ASSISTANT_TASK:>
Python Code:
import random
from numba import jit
import numpy as np
# Monte Carlo simulation function. This is defined as
# a function so the numba library can be used to speed
# up execution. Otherwise, this would run much slower.
@jit
def MCHist(n_hist, a, b, fmax):
score = (b - a)*fmax
tot_score = 0
for n in range(1, n_hist):
x = random.uniform(a, b)
f = random.uniform(0, fmax)
f_x = np.exp(x**2)
# Check if the point falls inside the integral
if f < f_x:
tot_score += score
return tot_score
# Run the simulation
num_hist = 1e8
results = MCHist(num_hist, 0.0, 2.0, 54.6)
integral_val = round(results / num_hist, 6)
print("The calculated integral is {}".format(integral_val))
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(1,4,1000)
y = (np.exp(x)/x) + np.exp(1/x)
plt.plot(x,y)
plt.ylabel('F(x)')
plt.xlabel('x');
@jit
def MCHist2(n_hist, a, b, fmax):
score = (b - a)*fmax
tot_score = 0
for n in range(1, n_hist):
x = random.uniform(a, b)
f = random.uniform(0, fmax)
f_x = (np.exp(x)/x) + np.exp(1/x)
# Check if the point falls inside the integral
if f < f_x:
tot_score += score
return tot_score
# Run the simulation
num_hist2 = 1e8
results2 = MCHist2(num_hist2, 1.0, 4.0, 100)
integral_val2 = round(results2 / num_hist2, 6)
print("The calculated integral is {}".format(integral_val2))
num_hist3 = 1e8
results3 = MCHist2(num_hist2, 1.0, 4.0, 15)
integral_val3 = round(results3 / num_hist3, 6)
print("The calculated integral is {}".format(integral_val3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Checking this answer with Wolfram Alpha, we get approximately the same result
Step2: From the above figure, we can see that the maximum is about 15. But what if we decided not to plot the function? Consider a situation where it's computationally expensive to plot the function over the entire limits of the integral. It's okay for us to choose an f_max that is too large, as long as we are sure that all possible values of F(x) will fall below it. The only downside of this approach is that more histories are required to converge to the correct answer.
Step3: Again, we check our work with Wolfram Alpha, and we get approximately the same result
|
10,368
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
from sympy import *
init_printing()
x, y = symbols('x y') #define x e y como variáveis simbólicas.
def f(x): return (x**3 - 3*x + 2)*exp(-x/4) - 1
f(x)
diff(f(x),x)
diff(f(x),x).subs(x,1)
#digite sua solução aqui!
x0 = -1/2
fl = diff(f(x),x).subs(x,-0.5) #f'(x_0)
print("coef. angular = ")
fl
def r(x) : return fl*(x-x0) + f(x0)
print("equação da reta tangente y=")
r(x)
%matplotlib inline
p1 = plot(f(x),(x,-2,2),show=False,line_color='b')
p2 = plot(r(x),(x,-1.5,1),show=False,line_color='r')
p1.extend(p2)
p1.show()
#digite a resolução aqui.
diff(f(x),x,2)
#digite sua resposta aqui!
plot(f(x),(x,-2,2))
plot(f(x),(x,2,40))
plot(f(x),(x,40,60))
fl = diff(f(x),x) #calcula $f'(x)$
x1 = nsolve(fl,x,-1) #calcula ponto crítico próx. de x=-1
x2 = nsolve(fl,x,1) #calcula ponto crítico próx. de x=1
x3 = nsolve(fl,x,10) #calcula ponto crítico próx. de x=10
print("x1=",x1)
print("x2=",x2)
print("x3=",x3)
diff(f(x),x,2).subs(x,x1) #calcula f''(x1)
diff(f(x),x,2).subs(x,x2) #calcula f''(x2)
diff(f(x),x,2).subs(x,x3) #calcula f''(x3)
#digite sua solução aqui!
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Para fixar as ideias, vamos continuar trabalhando com a função
Step2: Derivadas
Step3: Para avaliar a derivada em um ponto, por exemplo, para calcular $f'(1)$, digitamos
Step4: Exercício
Step5: Retas Tangente
Step6: Agora, já podemos definir a reta tangente
Step7: Vejamos os gráficos de $f(x)$ e da reta tangente calculada.
Step8: $\blacktriangleleft$
Step9: Derivadas de ordem mais altas
Step10: Exercício
Step11: Máximos e Mínimos Locais
Step12: Observando os gráficos, vemos que os pontos críticos de $f(x)$ têm máximos locais próximos ao ponto $x=-1$ e $x=10$ e um mínimo local próximo do ponto $x=1$. Logo, podemos usar estes valores como parâmetro da função $\verb+nsolve+$
Step13: Ok. Acamos de encontrar os pontos críticos de $f(x)$ (podem haver outros pontos críticos?). Por inspesão gráfica, vemos que $x_1 \approx -1,15$ e $x_3 \approx 12,15$ são os pontos de máximos locais de $f(x)$ e $x_3 = 1,0$ é o ponto de mínimo (global?).
Step14: Exercício
|
10,369
|
<ASSISTANT_TASK:>
Python Code:
from formosa import maxent_motifs, uniform_motifs, motif_ic
%matplotlib inline
from matplotlib import pyplot as plt
import random
random.seed(1)
maxent_samples = maxent_motifs(N=20, L=10, desired_ic=10, num_motifs=100000)
maxent_ics = map(motif_ic, maxent_samples)
_ = plt.hist(maxent_ics,bins=100)
plt.xlabel("Motif IC for MaxEnt Samples")
from formosa_utils import mean, sd
from math import sqrt
mu, sigma = mean(maxent_ics), sd(maxent_ics)
coverage = 1.96 * sigma/sqrt(10000)
print "sample mean: %1.3f bits" % mu
print "95%% confidence interval for mean: (%1.3f, %1.3f) (bits)" % (mu - coverage, mu + coverage)
tu_samples = uniform_motifs(N=20, L=10, desired_ic=10, epsilon=0.1, num_motifs=10000)
tu_ics = map(motif_ic, tu_samples)
_ = plt.hist(tu_ics,bins=100)
plt.xlabel("Motif IC for TU Samples")
import numpy as np
from math import exp
_ = plt.hist(tu_ics,bins=100)
plt.xlabel("Motif IC for TU Samples")
xs = np.linspace(9.9,10.1,1000)
fit = lambda x:300*exp(-15*(x-9.9))
plt.plot(xs,map(fit,xs),linewidth=5,color='r',label="exponential fit")
plt.legend()
from formosa import spoof_maxent_motifs, spoof_uniform_motifs
lacI_motif = ["CTATCACCGGAAGGGATTA",
"CTAACACCGTGCGTGTTGA",
"TTACCTCTGGCGGTGATAA",
"ATACCACTGGCGGTGATAC",
"TTATCTCTGGCGGTGTTGA",
"TAACCATCTGCGGTGATAA"]
lacI_spoofs = spoof_maxent_motifs(lacI_motif, num_motifs=10000)
lacI_ic = motif_ic(lacI_motif)
lacI_spoof_ics = map(motif_ic, lacI_spoofs)
_ = plt.hist(lacI_spoof_ics,bins=100,label="Replicate IC")
plt.plot([lacI_ic,lacI_ic],[0,500],linestyle='--',label="LacI motif IC")
plt.xlabel("Motif IC")
plt.legend()
mu, sigma = mean(lacI_spoof_ics), sd(lacI_spoof_ics)
coverage = 1.96 * sigma/sqrt(10000)
print "LacI motif IC: %1.3f" % lacI_ic
print "sample mean: %1.3f bits" % mu
print "95%% confidence interval for mean: (%1.3f, %1.3f) (bits)" % (mu - coverage, mu + coverage)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first two are for sampling; the third is for measuring the motif IC in order to validate our results. Let's also set up plotting for our notebook
Step2: We'll also set the random seed for reproducibility
Step3: MaxEnt Sampling
Step4: Now let's check our samples
Step5: The distribution of sampled motif ICs appears to be approximately normal, centered at 10 bits. We can check the mean with a quick confidence interval
Step6: So our mean IC appears to be exactly on target.
Step7: Notice that the TU sampler can sometimes be much slower than the MaxEnt sampler. This is because the TU sampler implements a rejection sampling algorithm with a MaxEnt sampler as the proposal distribution. It must exclude all motifs falling outside the desired IC interval. It must also reject some motifs which fall inside it in order to yield a statistically exact sample. Let's check the results
Step8: Notice that, in contrast to the MaxEnt samples, the distribution of the TU motif ICs appears to be approximately exponentially distributed over the permissible range of 9.9 to 10.1 bits. We can check this by overlaying a rough exponential fit to the data. Although the details of fitting parameters to truncated exponential data are beyond the scope of this notebook, we find that we can achieve a tolerable fit by eye
Step9: The exponential trend is expected behavior; a distribution that assigns equal probability mass to all motifs with IC between 9.9 and 10.1 bits will appear to be biased towards low IC values simply because there exist more motifs to be sampled on the low end than on the high end. Users should bear this phenomenon in mind when sampling from TU distributions.
Step10: In recent years, high-throughput methods have enabled the assembly of very large sequence motifs. For ease of illustration, though, let's consider the collection of LacI binding sites treated by Berg and von Hippel in 1987 [0]
Step11: To generate synthetic MaxEnt controls we can call
Step12: (TU motifs can be sampled in a similar way.) To verify the output, let's compare the IC of the original LacI motif to the IC distribution of the controls
|
10,370
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-hr', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
10,371
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import graphlab
products = graphlab.SFrame('amazon_baby_subset.gl/')
import json
with open('important_words.json', 'r') as f:
important_words = json.load(f)
important_words = [str(s) for s in important_words]
# Remote punctuation
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
products['review_clean'] = products['review'].apply(remove_punctuation)
# Split out the words into individual columns
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
products
train_data, validation_data = products.random_split(.9, seed=1)
print 'Training set : %d data points' % len(train_data)
print 'Validation set: %d data points' % len(validation_data)
import numpy as np
def get_numpy_data(data_sframe, features, label):
data_sframe['intercept'] = 1
features = ['intercept'] + features
features_sframe = data_sframe[features]
feature_matrix = features_sframe.to_numpy()
label_sarray = data_sframe[label]
label_array = label_sarray.to_numpy()
return(feature_matrix, label_array)
feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment')
feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment')
'''
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
score = np.dot(feature_matrix, coefficients)
# Compute P(y_i = +1 | x_i, w) using the link function
predictions = 1. / (1.+np.exp(-score))
return predictions
def feature_derivative(errors, feature):
# Compute the dot product of errors and feature
## YOUR CODE HERE
derivative = sum(feature * errors)
return derivative
def compute_avg_log_likelihood(feature_matrix, sentiment, coefficients):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
logexp = np.log(1. + np.exp(-scores))
# Simple check to prevent overflow
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
lp = np.sum((indicator-1)*scores - logexp)/len(feature_matrix)
return lp
j = 1 # Feature number
i = 10 # Data point number
coefficients = np.zeros(194) # A point w at which we are computing the gradient.
predictions = predict_probability(feature_matrix_train[i:i+1,:], coefficients)
indicator = (sentiment_train[i:i+1]==+1)
errors = indicator - predictions
gradient_single_data_point = feature_derivative(errors, feature_matrix_train[i:i+1,j])
print "Gradient single data point: %s" % gradient_single_data_point
print " --> Should print 0.0"
j = 1 # Feature number
i = 10 # Data point start
B = 10 # Mini-batch size
coefficients = np.zeros(194) # A point w at which we are computing the gradient.
predictions = predict_probability(feature_matrix_train[i:i+B,:], coefficients)
indicator = (sentiment_train[i:i+B]==+1)
errors = indicator - predictions
gradient_mini_batch = feature_derivative(errors, feature_matrix_train[i:i+B,j])
print "Gradient mini-batch data points: %s" % gradient_mini_batch
print " --> Should print 1.0"
from math import sqrt
def logistic_regression_SG(feature_matrix, sentiment, initial_coefficients, step_size, batch_size, max_iter):
log_likelihood_all = []
# make sure it's a numpy array
coefficients = np.array(initial_coefficients)
# set seed=1 to produce consistent results
np.random.seed(seed=1)
# Shuffle the data before starting
permutation = np.random.permutation(len(feature_matrix))
feature_matrix = feature_matrix[permutation,:]
sentiment = sentiment[permutation]
i = 0 # index of current batch
# Do a linear scan over data
for itr in xrange(max_iter):
# Predict P(y_i = +1|x_i,w) using your predict_probability() function
# Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,:]
### YOUR CODE HERE
predictions = predict_probability(feature_matrix[i:i+batch_size,:], coefficients)
# Compute indicator value for (y_i = +1)
# Make sure to slice the i-th entry with [i:i+batch_size]
### YOUR CODE HERE
indicator = (sentiment[i:i+batch_size]==+1)
# Compute the errors as indicator - predictions
errors = indicator - predictions
for j in xrange(len(coefficients)): # loop over each coefficient
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j]
# Compute the derivative for coefficients[j] and save it to derivative.
# Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,j]
### YOUR CODE HERE
derivative = feature_derivative(errors, feature_matrix[i:i+batch_size,j])
# compute the product of the step size, the derivative, and the **normalization constant** (1./batch_size)
### YOUR CODE HERE
coefficients[j] += (step_size * derivative * (1./batch_size))
# Checking whether log likelihood is increasing
# Print the log likelihood over the *current batch*
lp = compute_avg_log_likelihood(feature_matrix[i:i+batch_size,:], sentiment[i:i+batch_size],
coefficients)
log_likelihood_all.append(lp)
if itr <= 15 or (itr <= 1000 and itr % 100 == 0) or (itr <= 10000 and itr % 1000 == 0) \
or itr % 10000 == 0 or itr == max_iter-1:
data_size = len(feature_matrix)
print 'Iteration %*d: Average log likelihood (of data points in batch [%0*d:%0*d]) = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, \
int(np.ceil(np.log10(data_size))), i, \
int(np.ceil(np.log10(data_size))), i+batch_size, lp)
# if we made a complete pass over data, shuffle and restart
i += batch_size
if i+batch_size > len(feature_matrix):
permutation = np.random.permutation(len(feature_matrix))
feature_matrix = feature_matrix[permutation,:]
sentiment = sentiment[permutation]
i = 0
# We return the list of log likelihoods for plotting purposes.
return coefficients, log_likelihood_all
sample_feature_matrix = np.array([[1.,2.,-1.], [1.,0.,1.]])
sample_sentiment = np.array([+1, -1])
coefficients, log_likelihood = logistic_regression_SG(sample_feature_matrix, sample_sentiment, np.zeros(3),
step_size=1., batch_size=2, max_iter=2)
print '-------------------------------------------------------------------------------------'
print 'Coefficients learned :', coefficients
print 'Average log likelihood per-iteration :', log_likelihood
if np.allclose(coefficients, np.array([-0.09755757, 0.68242552, -0.7799831]), atol=1e-3)\
and np.allclose(log_likelihood, np.array([-0.33774513108142956, -0.2345530939410341])):
# pass if elements match within 1e-3
print '-------------------------------------------------------------------------------------'
print 'Test passed!'
else:
print '-------------------------------------------------------------------------------------'
print 'Test failed'
coefficients, log_likelihood = logistic_regression_SG(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-1, batch_size=1, max_iter=10)
# YOUR CODE HERE
coefficients_batch, log_likelihood_batch = logistic_regression_SG(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-1, batch_size=len(feature_matrix_train), max_iter=200)
2 * int(50000/100)
step_size = 1e-1
batch_size = 100
num_passes = 10
num_iterations = num_passes * int(len(feature_matrix_train)/batch_size)
coefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=1e-1, batch_size=100, max_iter=num_iterations)
import matplotlib.pyplot as plt
%matplotlib inline
def make_plot(log_likelihood_all, len_data, batch_size, smoothing_window=1, label=''):
plt.rcParams.update({'figure.figsize': (9,5)})
log_likelihood_all_ma = np.convolve(np.array(log_likelihood_all), \
np.ones((smoothing_window,))/smoothing_window, mode='valid')
plt.plot(np.array(range(smoothing_window-1, len(log_likelihood_all)))*float(batch_size)/len_data,
log_likelihood_all_ma, linewidth=4.0, label=label)
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
plt.xlabel('# of passes over data')
plt.ylabel('Average log likelihood per data point')
plt.legend(loc='lower right', prop={'size':14})
make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100,
label='stochastic gradient, step_size=1e-1')
make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100,
smoothing_window=30, label='stochastic gradient, step_size=1e-1')
step_size = 1e-1
batch_size = 100
num_passes = 200
num_iterations = num_passes * int(len(feature_matrix_train)/batch_size)
## YOUR CODE HERE
coefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=step_size, batch_size=batch_size, max_iter=num_iterations)
make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100,
smoothing_window=30, label='stochastic, step_size=1e-1')
make_plot(log_likelihood_batch, len_data=len(feature_matrix_train), batch_size=len(feature_matrix_train),
smoothing_window=1, label='batch, step_size=5e-1')
batch_size = 100
num_passes = 10
num_iterations = num_passes * int(len(feature_matrix_train)/batch_size)
coefficients_sgd = {}
log_likelihood_sgd = {}
for step_size in np.logspace(-4, 2, num=7):
coefficients_sgd[step_size], log_likelihood_sgd[step_size] = logistic_regression_SG(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=step_size, batch_size=batch_size, max_iter=num_iterations)
for step_size in np.logspace(-4, 2, num=7):
make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100,
smoothing_window=30, label='step_size=%.1e'%step_size)
for step_size in np.logspace(-4, 2, num=7)[0:6]:
make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100,
smoothing_window=30, label='step_size=%.1e'%step_size)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and process review dataset
Step2: Just like we did previously, we will work with a hand-curated list of important words extracted from the review data. We will also perform 2 simple data transformations
Step3: The SFrame products now contains one column for each of the 193 important_words.
Step4: Split data into training and validation sets
Step5: Convert SFrame to NumPy array
Step6: Note that we convert both the training and validation sets into NumPy arrays.
Step7: Are you running this notebook on an Amazon EC2 t2.micro instance? (If you are using your own machine, please skip this section)
Step8: Derivative of log likelihood with respect to a single coefficient
Step9: Note. We are not using regularization in this assignment, but, as discussed in the optional video, stochastic gradient can also be used for regularized logistic regression.
Step10: Quiz Question
Step11: Quiz Question
Step12: Quiz Question
Step13: Note. In practice, the final set of coefficients is rarely used; it is better to use the average of the last K sets of coefficients instead, where K should be adjusted depending on how fast the log likelihood oscillates around the optimum.
Step14: Compare convergence behavior of stochastic gradient ascent
Step15: Quiz Question. When you set batch_size = 1, as each iteration passes, how does the average log likelihood in the batch change?
Step16: Quiz Question. When you set batch_size = len(train_data), as each iteration passes, how does the average log likelihood in the batch change?
Step17: Log likelihood plots for stochastic gradient ascent
Step18: We provide you with a utility function to plot the average log likelihood as a function of the number of passes.
Step19: Smoothing the stochastic gradient ascent curve
Step20: Checkpoint
Step21: We compare the convergence of stochastic gradient ascent and batch gradient ascent in the following cell. Note that we apply smoothing with smoothing_window=30.
Step22: Quiz Question
Step23: Plotting the log likelihood as a function of passes for each step size
Step24: Now, let us remove the step size step_size = 1e2 and plot the rest of the curves.
|
10,372
|
<ASSISTANT_TASK:>
Python Code:
!pip install --pre deepchem
import deepchem
deepchem.__version__
import deepchem as dc
import tensorflow as tf
from deepchem.models.optimizers import ExponentialDecay
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Dense, Reshape
import matplotlib.pyplot as plot
import matplotlib.gridspec as gridspec
%matplotlib inline
mnist = tf.keras.datasets.mnist.load_data(path='mnist.npz')
images = mnist[0][0].reshape((-1, 28, 28, 1))/255
dataset = dc.data.NumpyDataset(images)
def plot_digits(im):
plot.figure(figsize=(3, 3))
grid = gridspec.GridSpec(4, 4, wspace=0.05, hspace=0.05)
for i, g in enumerate(grid):
ax = plot.subplot(g)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(im[i,:,:,0], cmap='gray')
plot_digits(images)
class DigitGAN(dc.models.WGAN):
def get_noise_input_shape(self):
return (10,)
def get_data_input_shapes(self):
return [(28, 28, 1)]
def create_generator(self):
return tf.keras.Sequential([
Dense(7*7*8, activation=tf.nn.relu),
Reshape((7, 7, 8)),
Conv2DTranspose(filters=16, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same'),
Conv2DTranspose(filters=1, kernel_size=5, strides=2, activation=tf.sigmoid, padding='same')
])
def create_discriminator(self):
return tf.keras.Sequential([
Conv2D(filters=32, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='same'),
Conv2D(filters=64, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='same'),
Dense(1, activation=tf.math.softplus)
])
gan = DigitGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
def iterbatches(epochs):
for i in range(epochs):
for batch in dataset.iterbatches(batch_size=gan.batch_size):
yield {gan.data_inputs[0]: batch[0]}
gan.fit_gan(iterbatches(100), generator_steps=0.2, checkpoint_interval=5000)
plot_digits(gan.predict_gan_generator(batch_size=16))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To begin, let's import all the libraries we'll need and load the dataset (which comes bundled with Tensorflow).
Step2: Let's view some of the images to get an idea of what they look like.
Step3: Now we can create our GAN. Like in the last tutorial, it consists of two parts
Step4: Now to train it. As in the last tutorial, we write a generator to produce data. This time the data is coming from a dataset, which we loop over 100 times.
Step5: Let's generate some data and see how the results look.
|
10,373
|
<ASSISTANT_TASK:>
Python Code:
print('Importing libraries...')
import numpy as np
import pandas as pd
from sklearn import cross_validation as cv
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
print('Fetching the training and test datasets...')
train = pd.read_csv("data/train.csv", dtype={"Age": np.float64}, )
test = pd.read_csv("data/test.csv", dtype={"Age": np.float64}, )
print('Cleaning the dataset...')
def harmonize_data(titanic):
# Filling the blank data
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].mean())
titanic["Fare"] = titanic["Fare"].fillna(titanic["Fare"].mean())
titanic["Embarked"] = titanic["Embarked"].fillna("S")
# Assigning binary form to data for calculation purpose
titanic.loc[titanic["Sex"] == "male", "Sex"] = 1
titanic.loc[titanic["Sex"] == "female", "Sex"] = 0
titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2
return titanic
print('Defining submission file...')
def create_submission(rfc, train, test, predictors, filename):
rfc.fit(train[predictors], train["Survived"])
predictions = rfc.predict(test[predictors])
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": predictions
})
submission.to_csv(filename, index=False)
print('Defining the clean dataset...')
train_data = harmonize_data(train)
test_data = harmonize_data(test)
print('Performing feature enginnering...')
train_data["PSA"] = train_data["Pclass"]*train_data["Sex"]*train_data["Age"]
train_data["SP"] = train_data["SibSp"]+train_data["Parch"]
test_data["PSA"] = test_data["Pclass"]*test_data["Sex"]*test_data["Age"]
test_data["SP"] = test_data["SibSp"]+test_data["Parch"]
print('Defining predictors...')
predictors = ["Pclass", "Sex", "Age", "PSA", "Fare", "Embarked", "SP"]
print('Finding best n_estimators for RandomForestClassifier...')
max_score = 0
best_n = 0
# For each value of n_estimators:
# 1- Split/CV your data (KFold)
# 2- Fit RF
# 3- Calculate score for n_estimators by averaging up the individual scores of the 10 folds
# 4- Once best_n found, do the same for max_depth
for n in range(1,100):
rfc_scr = 0.
rfc = RandomForestClassifier(n_estimators=n)
for train, test in KFold(len(train_data), n_folds=10, shuffle=True):
rfc.fit(train_data[predictors].T[train].T, train_data["Survived"].T[train].T)
rfc_scr += rfc.score(train_data[predictors].T[test].T, train_data["Survived"].T[test].T)/10
if rfc_scr > max_score:
max_score = rfc_scr
best_n = n
print(best_n, max_score)
print('Finding best max_depth for RandomForestClassifier...')
max_score = 0
best_m = 0
for m in range(1,100):
rfc_scr = 0.
rfc = RandomForestClassifier(max_depth=m)
for train, test in KFold(len(train_data), n_folds=10, shuffle=True):
rfc.fit(train_data[predictors].T[train].T, train_data["Survived"].T[train].T)
rfc_scr += rfc.score(train_data[predictors].T[test].T, train_data["Survived"].T[test].T)/10
if rfc_scr > max_score:
max_score = rfc_scr
best_m = m
print(best_n, best_m, max_score)
# Finally, we just use the optimal parameters found to create the final RF and fit our data
print('Applying method...')
rfc = RandomForestClassifier(n_estimators=best_n, max_depth=best_m)
print('Creating submission...')
create_submission(rfc, train_data, test_data, predictors, "rfcsurvivors.csv")
print('Submitted.')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cleaning the data & preparing a submission file
Step2: Creating new features
Step3: Finding best values for n_estimators and max_depth
Step4: Final random forest and submission
|
10,374
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
sys.path = [os.path.abspath("../../")] + sys.path
from deep_learning4e import *
from notebook4e import *
psource(gradient_descent)
pseudocode(adam_optimizer)
psource(adam_optimizer)
psource(BackPropagation)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There several key elements need to specify when using a gradient_descent optimizer
Step2: There are several attributes to specify when using Adam optimizer that is different from gradient descent
|
10,375
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import CombineCCFs
import numpy as np
from astropy import units as u, constants
from HelperFunctions import Gauss, integral
import os
import lmfit
import emcee
import triangle
from scipy.interpolate import InterpolatedUnivariateSpline as spline
sns.set_context('paper', font_scale=2.0)
home = os.environ['HOME']
hdf_file = '{}/School/Research/McDonaldData/PlanetData/PsiDraA/Cross_correlations/CCF.hdf5'.format(home)
output_dir = '{}/School/Research/McDonaldData/PlanetData/Paper/Figures/'.format(home)
T = 4400
vsini = 5
logg = 4.5
metal = 0.0
dV = 0.1
c = constants.c.cgs.to(u.m/u.s).value
xgrid = np.arange(-400, 400+dV/2., dV)
ccfs, original_files = CombineCCFs.get_ccfs(T=T, vsini=vsini, logg=logg, metal=metal,
hdf_file=hdf_file, xgrid=xgrid)
# Plot all the CCFs
cmap = sns.cubehelix_palette(reverse=False, as_cmap=True, gamma=1, rot=0.7, start=2)
fig, ax = plt.subplots(1, 1)
out = ax.imshow(ccfs, cmap=cmap, aspect='auto', origin='lower')#, vmin=vmin, vmax=vmax)
min_v = -75.
max_v = 75.
dv_ticks = 50.0/dV
ax.set_xlim(((min_v+400)/dV, (max_v+400)/dV))
ticks = np.arange((min_v+400)/dV, (max_v+400)/dV+1, dv_ticks)
ax.set_xticks((ticks))
#ax.set_xticklabels((-150, -100, -50, 0, 50, 100, 150))
ax.set_xticklabels((-75, -25, 25, 75))
ax.set_xlabel('Velocity in primary star rest frame (km/s)')
ax.set_yticklabels(())
ax.set_ylabel('Observation Date')
# Colorbar
cb = plt.colorbar(out)
cb.set_label('CCF Power')
# Save
plt.savefig('{}Original_CCFs.pdf'.format(output_dir))
avg_ccf = np.mean(ccfs, axis=0)
normed_ccfs = ccfs - avg_ccf
# Get the time each observation was made (in julian date)
dates = np.array([CombineCCFs.fits.getheader(fname)['HJD'] for fname in original_files])
# Set up the scaling manually
low, high = np.min(normed_ccfs), np.max(normed_ccfs)
rng = max(abs(low), abs(high))
vmin = np.sign(low) * rng
vmax = np.sign(high) * rng
# Make the actual plot
#cmap = sns.cubehelix_palette(reverse=False, as_cmap=True, gamma=1, rot=0.7, start=2) #defined above now...
fig, ax = plt.subplots(1, 1)
out = ax.imshow(normed_ccfs, cmap=cmap, aspect='auto', origin='lower')#, vmin=vmin, vmax=vmax)
ax.set_xlim(((min_v+400)/dV, (max_v+400)/dV))
ticks = np.arange((min_v+400)/dV, (max_v+400)/dV+1, dv_ticks)
ax.set_xticks((ticks))
#ax.set_xticklabels((-150, -100, -50, 0, 50, 100, 150))
ax.set_xticklabels((-75, -25, 25, 75))
ax.set_xlabel('Velocity (km/s)')
ax.set_yticklabels(())
ax.set_ylabel('Observation Date')
# Colorbar
cb = plt.colorbar(out)
cb.set_label('CCF Power')
fig.subplots_adjust(bottom=0.18, left=0.10, top=0.95, right=0.90)
plt.savefig('{}Resid_CCFs.pdf'.format(output_dir))
# Make the same plot, but with the y-axis linearized so that I can give dates.
X,Y = np.meshgrid(xgrid, dates-2450000)
fig, ax = plt.subplots(1, 1, figsize=(6,4))
out = ax.pcolormesh(X,Y,normed_ccfs, cmap=cmap, rasterized=True)
ax.set_xlabel('Velocity (km/s)')
ax.set_ylabel('JD - 2450000')
ax.set_xlim((-75, 75))
ax.set_ylim((Y.min(), Y.max()))
# Colorbar
cb = plt.colorbar(out)
cb.set_label('CCF Power')
fig.subplots_adjust(bottom=0.18, left=0.15, top=0.95, right=0.90)
plt.savefig('{}Resid_CCFs.pdf'.format(output_dir))
print(output_dir)
ax.pcolormesh?
def fwhm(x, y, search_range=(-500, 500)):
good = (x > search_range[0]) & (x < search_range[1])
x = x[good].copy()
y = y[good].copy()
idx = np.argmax(y)
ymax = y[idx]
half_max = ymax / 2.0
# Find the first pixels less than half_max to the left of idx
for di in range(1, idx):
if y[idx-di] < half_max:
break
slope = (y[idx-(di+1)] - y[idx-di])/(x[idx-(di+1)] - x[idx-di])
left = x[idx-di] + (half_max - y[idx-di])/slope
# Find the first pixels less than half_max to the right of idx
for di in range(1, len(x)-idx-1):
if y[idx+di] < half_max:
break
slope = (y[idx+(di+1)] - y[idx+di])/(x[idx+(di+1)] - x[idx+di])
right = x[idx+di] + (half_max - y[idx+di])/slope
return left, x[idx], right
# Make plot of a normal residual CCF
sns.set_style('white')
sns.set_style('ticks')
i = 50
corr = normed_ccfs[i]
fig, ax = plt.subplots()
ax.plot(xgrid, corr, 'k-', lw=3)
l, m, h = fwhm(xgrid, corr, search_range=(-50, 10))
ylim = ax.get_ylim()
ax.plot([m, m], ylim, 'g--', alpha=0.7)
ax.plot([l, l], ylim, 'r--', alpha=0.7)
ax.plot([h, h], ylim, 'r--', alpha=0.7)
ax.set_xlim((-60, 20))
ax.set_ylim(ylim)
ax.set_xlabel('Velocity (km/s)')
ax.set_ylabel('CCF Power')
fig.subplots_adjust(bottom=0.18, left=0.20, top=0.95, right=0.95)
plt.savefig('{}Typical_CCF.pdf'.format(output_dir))
# Measure the radial velocities of the companion as the peak and FWHM of the residual CCF
date = []
rv1 = []
rv1_err = []
rv2 = []
rv2_err = []
import time
for i in range(10, len(original_files)):
#for i in range(20, 21):
header = CombineCCFs.fits.getheader(original_files[i])
jd = header['HJD']
prim_rv = CombineCCFs.get_prim_rv(original_files[i], data_shift=0.0)
measurements = CombineCCFs.get_measured_rv(original_files[i])
rv1.append(measurements[0])
rv1_err.append(measurements[1])
try:
l, m, h = fwhm(xgrid.copy(), normed_ccfs[i], search_range=(-50, 10))
print('i = {}, HJD = {}\n\t{:.1f} +{:.1f}/-{:.1f}\n\t{}'.format(i, jd, m, h-m, m-l, original_files[i]))
date.append(jd)
rv2.append((h+l)/2.)
rv2_err.append((h-l)/2.355)
except:
date.append(jd)
rv2.append(np.nan)
rv2_err.append(np.nan)
continue
plt.plot(xgrid, normed_ccfs[i])
plt.xlim((-100, 50))
ylim = plt.ylim()
plt.plot([(h+l)/2., (h+l)/2.], ylim, 'g--')
plt.savefig('Figures/CCF_{}.pdf'.format(original_files[i][:-5]))
plt.cla()
rv1 = np.array(rv1)
rv1_err = np.array(rv1_err)
rv2 = np.array(rv2)
rv2_err = np.array(rv2_err)
# I don't trust the very early measurements
rv2[:6] = np.nan*np.ones(6)
rv2_err[:6] = np.nan*np.ones(6)
# Save the RVs
np.savetxt('rv_data.txt', (date, rv1, rv1_err, rv2, rv2_err))
import pandas as pd
rv1_data = pd.read_fwf('../Planet-Finder/data/psi1draa_140p_28_37_ASW.dat', header=None)
t1 = rv1_data[0].values
rv1 = rv1_data[2].values / 1000. # Convert from m/s to km/s
rv1_err = rv1_data[3].values / 1000.
new_rv2 = np.empty_like(t1)
new_rv2_err = np.empty_like(t1)
for i, t1_i in enumerate(t1):
#idx = np.searchsorted(t1, t2)
idx = np.argmin(np.abs(date-t1_i))
if abs(t1_i - date[idx]) < 0.001:
print i, t1_i, date[idx], t1_i-date[idx]
new_rv2[i] = rv2[idx]
new_rv2_err[i] = rv2_err[idx]
else:
print i, t1_i, date[idx], np.nan
new_rv2[i] = np.nan
new_rv2_err[i] = np.nan
np.savetxt('../Planet-Finder/data/rv_data.txt', (t1, rv1, rv1_err, new_rv2, new_rv2_err))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get and shift the Cross-correlation functions to the primary star rest frame
Step2: Measure the companion RVs.
Step3: Fix the dates to line up with the RV output file
|
10,376
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%pylab inline --no-import-all
pylab.rcParams['figure.figsize'] = (18, 10)
from ntfdl import Multi
from matplotlib.finance import candlestick_ohlc
from datetime import datetime, timedelta
# Instantiate multi with instrument FOE from Oslo exchange (OSS)
foe = Multi('FOE', exchange='OSE')
# Make a timerange from to
end = datetime.now() #Today
start = end - timedelta(days=20) #20 days ago
trades = foe.get_trades(start, end)
# Plotting without gaps
fig, ax = plt.subplots()
ax.plot(range(trades.price.size), trades.price)
ax.set_xticklabels(trades.index.date.tolist())
# Plot max and min line
ax.axhline(y=trades.price.max(), linestyle='--', color='r')
ax.axhline(y=trades.price.min(), linestyle='--', color='g')
# Adding axis right hand side
ax.tick_params(labeltop=False, labelright=True)
# Annotate last quote
xmin, xmax = ax.get_xlim()
plt.annotate(trades.iloc[-1].price, xy=(1, trades.iloc[-1].price), xytext=(0, 0), \
xycoords=('axes fraction', 'data'), textcoords='offset points', backgroundcolor='k', color='w')
#ax.plot(xmax, trades.iloc[-1].price, '<k', markersize=18, markeredgecolor='k')
fig.autofmt_xdate()
plt.grid()
ohlcv = foe.get_ohlcv(start, end, '1min')
ohlcv.tail()
def weekday_candlestick(ohlc_data, ax, fmt='%b %d', freq=7, **kwargs):
Wrapper function for matplotlib.finance.candlestick_ohlc
that artificially spaces data to avoid gaps from weekends or
other periodes without data
# No volume overlay for this one
del ohlc_data['volume']
# Convert data to numpy array
ohlc_data_arr = np.array(ohlc_data)
#print(ohlc_data_arr[:,1:]) #open, high, low, close
#print(ohlc_data_arr[:,0:]) #time
ohlc_data_arr2 = np.hstack(
[np.arange(ohlc_data_arr[:,0].size)[:,np.newaxis], ohlc_data_arr[:,1:]])
ndays = ohlc_data_arr2[:,0] # array([0, 1, 2, ... n-2, n-1, n])
dates = ohlc_data_arr[:,0]
date_strings = []
for date in dates:
date_strings.append(date.strftime(fmt))
# Plot candlestick chart
candlestick_ohlc(ax, ohlc_data_arr2, **kwargs)
# Format x axis
ax.set_xticks(ndays[::freq])
ax.set_xticklabels(date_strings[::freq], rotation=45, ha='right')
#ax.set_xlim(ndays.min(), ndays.max())
# Adding axis right hand side
ax.tick_params(labeltop=False, labelright=True)
# Show grid
plt.grid()
plt.show()
# Get data
ohlcv = foe.get_ohlcv(start, end, '30min')
fig, ax = plt.subplots(ncols=1)
# Calculate frequency, assumes all days are same number.
# Note that spacing of xtick can be borked if not.
freq = ohlcv[ohlcv.time > ohlcv["time"].map(lambda t: t.date()).unique()[-1]].shape[0]
weekday_candlestick(ohlcv, ax=ax, fmt='%b %d %H:%M', freq=freq, width=0.5, colorup='g', colordown='r')
fig.tight_layout()
positions = foe.get_positions(start, end)
positions.head()
# Plotting without gaps
fig, ax = plt.subplots()
ax.plot(range(positions.ask.size), positions.ask)
ax.plot(range(positions.bid.size), positions.bid)
ax.set_xticklabels(positions.index.date.tolist())
# Adding axis right hand side
ax.tick_params(labeltop=False, labelright=True)
fig.autofmt_xdate()
plt.grid()
# Export to csv
#positions.to_csv('foe.csv', sep=',', encoding='utf-8', columns=['time','bid', 'ask'], index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multi just calls dl.get_trades() and merges the data. Netfonds makes 20 days including today available, hence some days are not trading days (weekends, holidays) and the number of days might be less than expected.
Step2: Resampling multiple intraday trading data as OHLCV
Step4: Plotting multiple OHLCV intraday data
Step5: Positions
|
10,377
|
<ASSISTANT_TASK:>
Python Code:
!pip install hyperas
# Basic compuational libaries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
%matplotlib inline
np.random.seed(2)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from sklearn.model_selection import KFold
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.layers import Dense, Dropout, Conv2D, GlobalAveragePooling2D, Flatten, GlobalMaxPooling2D
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.models import Sequential
from keras.optimizers import RMSprop, Adam, SGD, Nadam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras import regularizers
# Import hyperopt for tunning hyper params
from hyperopt import hp, tpe, fmin
from hyperopt import space_eval
sns.set(style='white', context='notebook', palette='deep')
# Set the random seed
random_seed = 2
def data():
# Load the data
train = pd.read_csv("../input/digit-recognizer/train.csv")
test = pd.read_csv("../input/digit-recognizer/test.csv")
Y_train = train["label"]
# Drop 'label' column
X_train = train.drop(labels = ["label"],axis = 1)
# Normalize the data
X_train = X_train / 255.0
test = test / 255.0
# Reshape image in 3 dimensions (height = 28px, width = 28px , canal = 1)
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
# Encode labels to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])
Y_train = to_categorical(Y_train, num_classes = 10)
return X_train, Y_train, test
X, Y, X_test = data()
g = sns.countplot(np.argmax(Y, axis=1))
for i in range(0, 9):
plt.subplot(330 + (i+1))
plt.imshow(X[i][:,:,0], cmap=plt.get_cmap('gray'))
plt.title(np.argmax(Y[i]));
plt.axis('off')
plt.tight_layout()
epochs = 30 # Turn epochs to 30 to get 0.9967 accuracy
batch_size = 64
# With data augmentation to prevent overfitting (accuracy 0.99286)
train_aug = ImageDataGenerator(
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
)
test_aug = ImageDataGenerator()
# Set the CNN model
def train_model(train_generator, valid_generator, params):
model = Sequential()
model.add(Conv2D(filters = params['conv1'], kernel_size = params['kernel_size_1'], padding = 'Same',
activation ='relu', input_shape = (28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = params['conv2'], kernel_size = params['kernel_size_2'], padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size = params['pooling_size_1']))
model.add(Dropout(params['dropout1']))
model.add(BatchNormalization())
model.add(Conv2D(filters = params['conv3'], kernel_size = params['kernel_size_3'], padding = 'Same',
activation ='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = params['conv4'], kernel_size = params['kernel_size_4'], padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size = params['pooling_size_1'], strides=(2,2)))
model.add(Dropout(params['dropout2']))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(params['dense1'], activation = "relu"))
model.add(Dropout(params['dropout3']))
model.add(Dense(10, activation = "softmax"))
if params['opt'] == 'rmsprop':
opt = RMSprop()
elif params['opt'] == 'sgd':
opt = SGD()
elif params['opt'] == 'nadam':
opt = Nadam()
else:
opt = Adam()
model.compile(loss=params['loss'], optimizer=opt, metrics=['acc'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=2, mode='auto', cooldown=2, min_lr=1e-7)
early = EarlyStopping(monitor='val_loss', patience=3)
callbacks_list = [reduce_lr, early]
history = model.fit_generator(train_generator,
validation_data=valid_generator,
steps_per_epoch=len(train_generator),
validation_steps=len(valid_generator),
callbacks=callbacks_list, epochs = epochs,
verbose=2)
score, acc = model.evaluate_generator(valid_generator, steps=len(valid_generator), verbose=0)
return acc, model, history
#This is the space of hyperparameters that we will search
space = {
'opt':hp.choice('opt', ['adam', 'sgd', 'rmsprop']),
'conv1':hp.choice('conv1', [16, 32, 64, 128]),
'conv2':hp.choice('conv2', [16, 32, 64, 128]),
'kernel_size_1': hp.choice('kernel_size_1', [3, 5]),
'kernel_size_2': hp.choice('kernel_size_2', [3, 5]),
'dropout1': hp.choice('dropout1', [0, 0.25, 0.5]),
'pooling_size_1': hp.choice('pooling_size_1', [2, 3]),
'conv3':hp.choice('conv3', [32, 64, 128, 256, 512]),
'conv4':hp.choice('conv4', [32, 64, 128, 256, 512]),
'kernel_size_3': hp.choice('kernel_size_3', [3, 5]),
'kernel_size_4': hp.choice('kernel_size_4', [3, 5]),
'dropout2':hp.choice('dropout2', [0, 0.25, 0.5]),
'pooling_size_2': hp.choice('pooling_size_2', [2, 3]),
'dense1':hp.choice('dense1', [128, 256, 512, 1024]),
'dropout3':hp.choice('dropout3', [0, 0.25, 0.5]),
'loss': hp.choice('loss', ['categorical_crossentropy', 'kullback_leibler_divergence']),
}
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size = 0.2, random_state=random_seed)
# only apply data augmentation with train data
train_gen = train_aug.flow(X_train, Y_train, batch_size=batch_size)
valid_gen = test_aug.flow(X_val, Y_val, batch_size=batch_size)
def optimize(params):
acc, model, history = train_model(train_gen, valid_gen, params)
return -acc
best = fmin(fn = optimize, space = space,
algo = tpe.suggest, max_evals = 50) # change to 50 to search more
best_params = space_eval(space, best)
print('best hyper params: \n', best_params)
acc, model, history = train_model(train_gen, valid_gen, best_params)
print("validation accuracy: {}".format(acc))
optimizers = ['rmsprop', 'sgd', 'adam']
hists = []
params = best_params
for optimizer in optimizers:
params['opt'] = optimizer
print("Train with optimizer: {}".format(optimizer))
_, _, history = train_model(train_gen, valid_gen, params)
hists.append((optimizer, history))
for name, history in hists:
plt.plot(history.history['val_acc'], label=name)
plt.legend(loc='best', shadow=True)
plt.tight_layout()
loss_functions = ['categorical_crossentropy', 'kullback_leibler_divergence']
hists = []
params = best_params
for loss_funct in loss_functions:
params['loss'] = loss_funct
print("Train with loss function : {}".format(loss_funct))
_, _, history = train_model(train_gen, valid_gen, params)
hists.append((loss_funct, history))
for name, history in hists:
plt.plot(history.history['val_acc'], label=name)
plt.legend(loc='best', shadow=True)
plt.tight_layout()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Predict the values from the validation dataset
Y_pred = model.predict(X_val)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(Y_val,axis = 1)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(10))
# Display some error results
# Errors are difference between predicted labels and true labels
errors = (Y_pred_classes - Y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_val[errors]
def display_errors(errors_index,img_errors,pred_errors, obs_errors):
This function shows 6 images with their predicted and real labels
n = 0
nrows = 2
ncols = 3
fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)
for row in range(nrows):
for col in range(ncols):
error = errors_index[n]
ax[row,col].imshow((img_errors[error]).reshape((28,28)))
ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))
n += 1
fig.tight_layout()
# Probabilities of the wrong predicted numbers
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
# Predicted probabilities of the true values in the error set
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
# Difference between the probability of the predicted label and the true label
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
# Sorted list of the delta prob errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
# Top 6 errors
most_important_errors = sorted_dela_errors[-6:]
# Show the top 6 errors
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)
kf = KFold(n_splits=5)
preds = []
for train_index, valid_index in kf.split(X):
X_train, Y_train, X_val, Y_val = X[train_index], Y[train_index], X[valid_index], Y[valid_index]
train_gen = train_aug.flow(X_train, Y_train, batch_size=batch_size)
valid_gen = test_aug.flow(X_val, Y_val, batch_size=batch_size)
acc, model, history = train_model(train_gen, valid_gen, best_params)
pred = model.predict(X_test)
preds.append(pred)
# predict results
results = np.mean(preds, axis=0)
# select the indix with the maximum probability
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Tiền xử lý
Step2: Kiểm tra phân bố của nhãn
Step3: Thử nhìn qua một số mẫu trong tập huấn luyện. Chúng ta thấy rằng hầu hết các ảnh đều rõ nét và tương đối dễ dàng để nhận dạng.
Step4: Định nghĩa số epochs cần huấn luyện và bachsize
Step5: 3. Data Augmentation
Step6: 4. Xây dưng mô hình CNN
Step7: 5. Hyper-params tunning
Step8: 5.2 Optimze để tìm bộ tham số tốt nhất
Step9: Chạy quá trình search tham số. Bộ siêu tham số tốt nhất sẽ được ghi nhận lại để chúng ta sử dụng trong mô hình cuối cùng.
Step10: Huấn luyện lại mô hình với bộ tham số tốt nhất ở trên.
Step11: Kết quả trên tập validation khá cao với acc > 99%
Step12: Plot quá trình huấn luyện mô hình với 3 lọai optimizers khác nhau.
Step13: 6.2 So sánh các loss function
Step14: Plot quá trình huấn luyện mô hình với 2 loại loss function khác nhau.
Step16: Chúng ta thấy rằng không có sự khác biệt rõ rằng về tốc độ hội tụ giữ 2 hàm loss function là cross-entropy và KL loss trong bài toán của chúng ta.
Step18: Các giá trị trên đường chéo rất cao, chúng ta mô hình chúng ta có độ chính xác rất tốt.
Step19: Với các mẫu ảnh sai, chúng ta có thể thấy rằng những mẫu này rất khó nhận dạng nhầm lẫn sáng các lớp khác. ví dụ số 9 và 4 hay là 3 và 8
|
10,378
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
#%config InlineBackend.figure_format = 'pdf'
import freqopttest.util as util
import freqopttest.data as data
import freqopttest.ex.exglobal as exglo
import freqopttest.kernel as kernel
import freqopttest.tst as tst
import freqopttest.glo as glo
import freqopttest.plot as plot
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import scipy.signal as sig
import sys
# font options
font = {
#'family' : 'normal',
#'weight' : 'bold',
'size' : 18
}
plt.rc('font', **font)
plt.rc('lines', linewidth=2)
class SSMix2Unif(data.SampleSource):
1-d problem.
P: U(m_p - w_p/2, mp+w_p/2) where w_p is the width of the uniform distribution,
m_p is the mean
Q: w*U(m_q-w_q/2, mq+w_q/2) + (1-w)*p(x) where p(x) is the density of
def __init__(self, w, mp, wp, mq, wq):
if not (w>=0 and w<=1):
raise RuntimeError('w must be in [0, 1]')
self.w = w
self.mp = mp
self.wp = wp
self.mq = mq
self.wq = wq
def dim(self):
return 1
def sample(self, n, seed):
rstate = np.random.get_state()
np.random.seed(seed)
w = self.w
mp = self.mp
wp = self.wp
mq = self.mq
wq = self.wq
disc_var = stats.rv_discrete(values=([0, 1], [w, 1-w]) )
ind = disc_var.rvs(size=n)
ind0 = ind==0
#print ind0
ind1 = ind==1
# draw from Q
ys = stats.uniform.rvs(loc=mq-wq/2.0, scale=wq, size=np.sum(ind0))
yb = stats.uniform.rvs(loc=mp-wp/2.0, scale=wp, size=np.sum(ind1))
y = np.hstack((ys, yb))
y = y[:, np.newaxis]
# draw from P
x = stats.uniform.rvs(loc=mp-wp/2.0, scale=wp, size=n)
x = x[:, np.newaxis]
np.random.set_state(rstate)
return data.TSTData(x, y, label='mix2unif')
# sample source
n = 4000
alpha = 0.01
w = 0.4
seed = 43
prob_params = {'w': w, 'mp': 25, 'wp': 5, 'mq': 0, 'wq': 0.2}
ss = SSMix2Unif(**prob_params)
tst_data = ss.sample(n, seed=seed)
tr = ss.sample(n//2, seed=seed)
te = ss.sample(n//2, seed=seed+1)
#tr, te = tst_data.split_tr_te(tr_proportion=0.5, seed=10)
nte = te.X.shape[0]
xtr, ytr = tr.xy()
xytr = tr.stack_xy()
bins = np.linspace(np.min(xytr), np.max(xytr), 30)
normed_hist = True
plt.hist(xtr, bins, label='X', alpha=0.5, normed=normed_hist)
plt.hist(ytr, bins, label='Y', alpha=0.5, normed=normed_hist)
plt.legend(loc='best')
alpha = 0.01
nte = 400
rep = 400
# repeat many trials to see the value of the optimized location
def two_locations_test_results(nte, k):
k: an instance of Kernel
Tp_results = []
Tq_results = []
shift_seed = 1000
for r in range(shift_seed, shift_seed+rep):
#tst_data = ss.sample(n, seed=r)
#tr, te = tst_data.split_tr_te(tr_proportion=0.5, seed=10)
te = ss.sample(nte, seed=r+1)
# test loc at the small bump
Tq = np.array([[prob_params['mq']]])
# test loc at the big bump
Tp = np.array([[prob_params['mp'] ]])
#Tp = np.array([[ 2 ]])
# actual test
q_met = tst.METest(Tq, k, alpha)
Tq_results.append(q_met.perform_test(te))
p_met = tst.METest(Tp, k, alpha)
Tp_results.append(p_met.perform_test(te))
return Tp_results, Tq_results
def prob_tq_better(nte, k):
k: an instance of Kernel
Tp_results, Tq_results = two_locations_test_results(nte, k)
tp_lambs = np.array([r['test_stat'] for r in Tp_results ])
tq_lambs = np.array([r['test_stat'] for r in Tq_results ])
n_left_high = np.sum(tq_lambs>tp_lambs)
return float(n_left_high)/rep, tp_lambs, tq_lambs
def plot_tup_stats(tup_ntes):
ps_small_better = np.array([t[0] for t in tup_ntes])
plt.plot(ntes, ps_small_better, 'ob-')
plt.xlabel('nte')
plt.ylabel('p(loc at small bump gives high $\hat{\lambda}_n$)')
plt.figure()
tp_lamb_means = np.array([np.mean(l) for l in [t[1] for t in tup_ntes]])
tp_lamb_stds = np.array([np.std(l) for l in [t[1] for t in tup_ntes]])
tq_lamb_means = np.array([np.mean(l) for l in [t[2] for t in tup_ntes]])
tq_lamb_stds = np.array([np.std(l) for l in [t[2] for t in tup_ntes]])
print('mq is the mean of the small bump')
plt.errorbar(ntes, tp_lamb_means, tp_lamb_stds,
label=r'$\hat{\mathbb{E}}[\hat{\lambda}_n \mid \mathrm{at }\,\, m_p]$')
plt.errorbar(ntes, tq_lamb_means, tq_lamb_stds,
label=r'$\hat{\mathbb{E}}[\hat{\lambda}_n \mid \mathrm{at }\,\, m_q]$')
plt.xlabel('test sample size')
plt.ylabel('$\hat{\lambda}_n$')
plt.legend(loc='best')
ntes = np.linspace(5, 200, num=7, dtype=np.int64)
#ntes = np.array([ 25, 50, 75, 100, 150, 200, 250])
tri_width = 3
ktri = kernel.KTriangle(tri_width)
ktri_tup_ntes = [prob_tq_better(nte, ktri) for nte in ntes]
plot_tup_stats(ktri_tup_ntes)
tp_lamb_means = np.array([np.mean(l) for l in [t[1] for t in ktri_tup_ntes]])
tp_lamb_stds = np.array([np.std(l) for l in [t[1] for t in ktri_tup_ntes]])
tq_lamb_means = np.array([np.mean(l) for l in [t[2] for t in ktri_tup_ntes]])
tq_lamb_stds = np.array([np.std(l) for l in [t[2] for t in ktri_tup_ntes]])
alpha = 0.01
nte = 400
rep = 300
#ntes = np.linspace(50, 300, num=7, dtype=np.int64)
ntes = np.array([ 25, 50, 75, 100, 150, 200, 250])
#gwidth0 = 1
med_data = ss.sample(1000, seed=10)
gwidth0 = util.meddistance(med_data.stack_xy(), subsample=1000)**2
kgauss = kernel.KGauss(gwidth0)
tup_ntes = [prob_tq_better(nte, kgauss) for nte in ntes]
plot_tup_stats(tup_ntes)
dom = np.linspace(-3, 3, 200)
for order in [0, 1, 2]:
bspline_vals = sig.bspline(dom, order)
plt.plot(dom, bspline_vals, label='order=%d'%order)
plt.legend()
plt.title('B-spline')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook investigates the stability of the learned test location. We consider the case where P is a mixture of two uniform distributions on 1D, one of which has small height, and the second component has a much larger height. Here Q is a uniform distribution whose mass strongly overlaps with the second component of P.
Step4: Check stability of the two modes. Triangular kernel
Step5: Check stability of the two modes. Gaussian kernel
Step6: test B-spline
|
10,379
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'ukesm1-0-ll', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
10,380
|
<ASSISTANT_TASK:>
Python Code:
import arviz as az
import numpy as np
import emcee
az.style.use("arviz-darkgrid")
J = 8
y_obs = np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0])
sigma = np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0])
def log_prior_8school(theta):
mu, tau, eta = theta[0], theta[1], theta[2:]
# Half-cauchy prior, hwhm=25
if tau < 0:
return -np.inf
prior_tau = -np.log(tau ** 2 + 25 ** 2)
prior_mu = -(mu / 10) ** 2 # normal prior, loc=0, scale=10
prior_eta = -np.sum(eta ** 2) # normal prior, loc=0, scale=1
return prior_mu + prior_tau + prior_eta
def log_likelihood_8school(theta, y, s):
mu, tau, eta = theta[0], theta[1], theta[2:]
return -((mu + tau * eta - y) / s) ** 2
def lnprob_8school(theta, y, s):
prior = log_prior_8school(theta)
like_vect = log_likelihood_8school(theta, y, s)
like = np.sum(like_vect)
return like + prior
nwalkers = 40 # called chains in ArviZ
ndim = J + 2
draws = 1500
pos = np.random.normal(size=(nwalkers, ndim))
pos[:, 1] = np.absolute(pos[:, 1])
sampler = emcee.EnsembleSampler(
nwalkers,
ndim,
lnprob_8school,
args=(y_obs, sigma)
)
sampler.run_mcmc(pos, draws);
# define variable names, it cannot be inferred from emcee
var_names = ["mu", "tau"] + ["eta{}".format(i) for i in range(J)]
idata1 = az.from_emcee(sampler, var_names=var_names)
idata1
idata1.sel(draw=slice(100, None))
az.plot_posterior(idata1, var_names=["mu", "tau", "eta4"])
idata2 = az.from_emcee(sampler, slices=[0, 1, slice(2, None)])
idata2
az.plot_trace(idata2, var_names=["var_2"], coords={"var_2_dim_0": 4});
def lnprob_8school_blobs(theta, y, s):
prior = log_prior_8school(theta)
like_vect = log_likelihood_8school(theta, y, s)
like = np.sum(like_vect)
return like + prior, like_vect
sampler_blobs = emcee.EnsembleSampler(
nwalkers,
ndim,
lnprob_8school_blobs,
args=(y_obs, sigma),
)
sampler_blobs.run_mcmc(pos, draws);
dims = {"eta": ["school"], "log_likelihood": ["school"]}
idata3 = az.from_emcee(
sampler_blobs,
var_names = ["mu", "tau", "eta"],
slices=[0, 1, slice(2,None)],
blob_names=["log_likelihood"],
dims=dims,
coords={"school": range(8)}
)
idata3
sampler_blobs.blobs[0, 1]
def lnprob_8school_blobs(theta, y, sigma):
mu, tau, eta = theta[0], theta[1], theta[2:]
prior = log_prior_8school(theta)
like_vect = log_likelihood_8school(theta, y, sigma)
like = np.sum(like_vect)
# store pointwise log likelihood, useful for model comparison with az.loo or az.waic
# and posterior predictive samples as blobs
return like + prior, (like_vect, np.random.normal((mu + tau * eta), sigma))
sampler_blobs = emcee.EnsembleSampler(
nwalkers,
ndim,
lnprob_8school_blobs,
args=(y_obs, sigma),
)
sampler_blobs.run_mcmc(pos, draws);
dims = {"eta": ["school"], "log_likelihood": ["school"], "y": ["school"]}
idata4 = az.from_emcee(
sampler_blobs,
var_names = ["mu", "tau", "eta"],
slices=[0, 1, slice(2,None)],
arg_names=["y","sigma"],
arg_groups=["observed_data", "constant_data"],
blob_names=["log_likelihood", "y"],
blob_groups=["log_likelihood", "posterior_predictive"],
dims=dims,
coords={"school": range(8)}
)
idata4
az.plot_ppc(idata4, var_names=["y"], alpha=0.3, num_pp_samples=200);
%load_ext watermark
%watermark -n -u -v -iv -w
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Manually set variable names
Step2: ArviZ has stored the posterior variables with the provided names as expected, but it has also included other useful information in the InferenceData object. The log probability of each sample is stored in the sample_stats group under the name lp and all the arguments passed to the sampler as args have been saved in the observed_data group.
Step3: From an InferenceData object, ArviZ's native data structure, the {func}posterior plot <arviz.plot_posterior> of a few variables can be done in one line
Step4: Structuring the posterior as multidimensional variables
Step5: After checking the default variable names, the trace of one dimension of eta can be plotted using ArviZ syntax
Step6: blobs
Step7: You can now use the blob_names argument to indicate how to store this blob-defined variable. As the group is not specified, it will go to sample_stats.
Step8: Multi-group blobs
Step9: This last version, which contains both observed data and posterior predictive can be used to plot posterior predictive checks
|
10,381
|
<ASSISTANT_TASK:>
Python Code:
5 / 2
from sympy import Rational
Rational(5,2)
from sympy import S
S?
type(S(5))
S(5)/2
S('13/2') + S(5)/7
from sympy import arg,re,im,I
a = 3 + 5*I
re(a)
im(a)
arg(a).n()
abs(a)
from sympy import symbols
x3,x4,x5,x6,x7 = symbols('x3:8')
x3 + x4+ 6 *x7
from sympy import *
init_printing(pretty_print=True, use_latex='mathjax')
#from sympy.abc import * # à éviter
from sympy.abc import a,b,c,n,t,u,v,w,x,y,z
eq1 = Eq(x, 3)
eq2 = Eq(x**2+y**2, 16)
solve(eq1, x)
solve(eq2, y)
solve(Eq(x**2+2*x, 4))
eq1 = Eq(x+y, 34)
eq2 = Eq(x*y, 34)
systeme = [eq1, eq2]
solve(systeme)
eq1 = Eq(x+y, 34)
eq2 = Eq(x*y**2, 34)
systeme = [eq1, eq2]
solve(systeme)
solve([x+y-34, x*y-34])
roots(3*x**2+52*x-265)
r1,r2,r3 = roots(3*x**3+52*x-265)
r1.n(), r2.n(), r3.n()
%matplotlib inline
f = x**2
plot(x**2, (x,-20,20))
g = (3*x**2+52*x-265) / ((x-7)*(x-1)*(x+34))
plot(g, (x,-40,10), xlim=(-40, 10), ylim=(-2,2))
plot(x**3+2*x**2-1, (x,-2,1), ylim=(-1,1))
roots(x**3+2*x**2-1)
plot(f, g)
from sympy.plotting import plot3d
j
plot3d(x**2-y**2)
plot3d((x**2-y**2)*sin(x))
from sympy.plotting import plot_parametric
plot_parametric(sin(t), cos(t))
x = sin(t)*(E**cos(t)-2*cos(4*t)-sin(t/12)**5)
y = cos(t)*(E**cos(t)-2*cos(4*t)-sin(t/12)**5)
plot_parametric(x, y)
from IPython.display import Image
url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/f/f5/Param_02.jpg/800px-Param_02.jpg'
Image(url=url, width=500)
from sympy.plotting import plot3d_parametric_line
x = cos(u)
y = sin(u)
z = u
plot3d_parametric_line(x,y,z)
x = u*cos(u)
y = u*sin(u)
z = u
plot3d_parametric_line(x,y,z)
from sympy.plotting import plot3d_parametric_surface
from sympy.abc import u,v
R = 5
r = 2
X = cos(u)*(R+r*cos(v))
Y = sin(u)*(R+r*cos(v))
Z = r*sin(v)
plot3d_parametric_surface(X, Y, Z, (u, -.5, 4), (v, -5, 5))
from sympy.abc import theta,phi
from sympy import pi,cos,sin
x = cos(theta)*sin(phi)
y = sin(theta)*sin(phi)
z = cos(theta)
plot3d_parametric_surface(x,y,z,(theta,0,pi), (phi,0,pi))
from sympy import plot_implicit
eq = Eq(x**2+y**2+x*y-2*x, 5)
plot_implicit(eq)
#from sympy import mpmath # Sympy (installation normale)
import mpmath # SageMath
from mpmath import cplot
cplot(lambda z:z)
cplot(lambda z:z**5-1)
from sympy.abc import x
limit(1/x, x, 0, dir='+')
oo
limit(1/x, x, oo)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3.2 Nombres complexes
Step2: 4.2 Définir les variables symboliques x_1, x_2, ... x_n
Step3: Initialisation
Step4: Importer quelques variables du module abc
Step5: 5 Résolution d'équations
Step6: Définir l'équation $x^2+y^2=16$
Step7: 5.2 Résoudre une équation
Step8: Résoudre l'équation $x^2+2x=4$
Step9: 5.3 Résoudre un système d'équations
Step10: Résoudre le système $x+y=34$, $xy^2=34$
Step11: 5.4 Syntaxe abrégée
Step12: 5.5 Trouver les racines d'une fonction
Step13: $$3x^3 + 52x - 265$$
Step14: 6 Tracer une fonction
Step15: 6.1 Tracer une fonction R -> R
Step16: $$g(x) = {3x^2 + 52x - 265 \over (x - 7)(x - 1)(x + 34)}$$
Step17: Approximer les racines de $$x^3 + 2x^2 - 1$$ avec un dessin
Step18: 6.2 Tracer plusieurs fonctions R -> R
Step19: 6.3 Tracer une fonction R^2 -> R
Step20: $$x^2+y^2$$
Step21: $$x^2-y^2$$
Step22: $$(x^2-y^2)\sin(x)$$
Step23: 6.4 Dessiner une fonction R -> R^2
Step24: $$x = \sin(t)$$
Step25: Butterfly curve (transcendental)
Step26: 6.5 Dessiner une fonction R -> R^3
Step27: \begin{align} x(u)&= \cos(u)\ y(u)&=\sin(u)\ z(u)&=u\end{align}
Step28: \begin{align} x(u)&= u \cos(u)\ y(u)&=\sin(u)\ z(u)&=u\end{align}
Step29: 6.6 Dessiner une fonction R^2 -> R^3
Step30: \begin{align}
Step31: $$\vec r(\theta,\phi) = (\cos\theta \sin\phi, \sin\theta \sin \phi, \cos\phi), \quad 0 \leq \theta < 2\pi, 0 \leq \phi \leq \pi.$$
Step32: 6.7 Dessiner les solutions d'une équation implicite
Step33: 6.8 Tracer une région de R^2
Step34: Tracer la fonction complexe identité $$f(z)=z$$
Step35: Tracer la fonction complexe $$f(z)=z^5-1$$
Step36: 7 Limites et séries
|
10,382
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(z, n_units)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim)
out = tf.nn.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(x, n_units)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
10,383
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import antipackage
import github.ellisonbg.misc.vizarray as va
def checkerboard(size):
Z = np.empty((size,size),dtype=float)
Z.fill(1.0)
Z[1::2,::2] = 0.0
Z[::2,1::2] = 0.0
return Z
va.vizarray(Z)
checkerboard(4)
a = checkerboard(4)
assert a[0,0]==1.0
assert a.sum()==8.0
assert a.dtype==np.dtype(float)
assert np.all(a[0,0:5:2]==1.0)
assert np.all(a[1,0:5:2]==0.0)
b = checkerboard(5)
assert b[0,0]==1.0
assert b.sum()==13.0
assert np.all(b.ravel()[0:26:2]==1.0)
assert np.all(b.ravel()[1:25:2]==0.0)
va.enable()
va.set_block_size(10)
checkerboard(20)
assert True
va.set_block_size(5)
checkerboard(27)
assert True
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Checkerboard
Step2: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.
Step3: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.
|
10,384
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-1', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
10,385
|
<ASSISTANT_TASK:>
Python Code:
def backward_differencing(f, x_i, dx):
Backward differencing of f at x_i with grid spacing dx.
f_i = f(x_i)
f_i_minus_1 = f(x_i - dx)
return (f_i - f_i_minus_1) / dx
def forward_differencing(f, x_i, dx):
Forward differencing of f at x_i with grid spacing dx.
f_i = f(x_i)
f_i_plus_1 = f(x_i + dx)
return (f_i_plus_1 - f_i) / dx
def central_differencing(f, x_i, dx):
Second order central differencing of f at x_i with grid spacing dx.
f_i = f(x_i)
f_i_minus_1 = f(x_i - dx)
f_i_plus_1 = f(x_i + dx)
first_derivative = (f_i_plus_1 - f_i_minus_1) / (2.0 * dx)
second_derivative = (f_i_minus_1 - 2.0 * f_i + f_i_plus_1) / (dx**2)
return first_derivative, second_derivative
import numpy
bd = backward_differencing(numpy.exp, 0.0, dx=1.0)
fd = forward_differencing(numpy.exp, 0.0, dx=1.0)
cd1, cd2 = central_differencing(numpy.exp, 0.0, dx=1.0)
print("Backward difference should be 1, is {}, error {}".format(bd, abs(bd - 1.0)))
print("Forward difference should be 1, is {}, error {}".format(fd, abs(fd - 1.0)))
print("Central difference (1st derivative) should be 1, is {}, error {}".format(cd1, abs(cd1 - 1.0)))
print("Central difference (2nd derivative) should be 1, is {}, error {}".format(cd2, abs(cd2 - 1.0)))
bd = backward_differencing(numpy.exp, 0.0, dx=0.1)
fd = forward_differencing(numpy.exp, 0.0, dx=0.1)
cd1, cd2 = central_differencing(numpy.exp, 0.0, dx=0.1)
print("Backward difference should be 1, is {}, error {}".format(bd, abs(bd - 1.0)))
print("Forward difference should be 1, is {}, error {}".format(fd, abs(fd - 1.0)))
print("Central difference (1st derivative) should be 1, is {}, error {}".format(cd1, abs(cd1 - 1.0)))
print("Central difference (2nd derivative) should be 1, is {}, error {}".format(cd2, abs(cd2 - 1.0)))
from matplotlib import pyplot
%matplotlib notebook
dxs = numpy.logspace(-5, 0, 10)
bd_errors = numpy.zeros_like(dxs)
fd_errors = numpy.zeros_like(dxs)
cd1_errors = numpy.zeros_like(dxs)
cd2_errors = numpy.zeros_like(dxs)
for i, dx in enumerate(dxs):
bd_errors[i] = abs(backward_differencing(numpy.exp, 0.0, dx) - 1.0)
fd_errors[i] = abs(forward_differencing(numpy.exp, 0.0, dx) - 1.0)
cd1, cd2 = central_differencing(numpy.exp, 0.0, dx)
cd1_errors[i] = abs(cd1 - 1.0)
cd2_errors[i] = abs(cd2 - 1.0)
pyplot.figure()
pyplot.loglog(dxs, bd_errors, 'kx', label='Backwards')
pyplot.loglog(dxs, fd_errors, 'b+', label='Forwards')
pyplot.loglog(dxs, cd1_errors, 'go', label='Central (1st)')
pyplot.loglog(dxs, cd2_errors, 'r^', label='Central (2nd)')
pyplot.loglog(dxs, dxs*(bd_errors[0]/dxs[0]), 'k-', label=r"$p=1$")
pyplot.loglog(dxs, dxs**2*(cd1_errors[0]/dxs[0]**2), 'k--', label=r"$p=2$")
pyplot.xlabel(r"$\Delta x$")
pyplot.ylabel("Error")
pyplot.legend(loc="lower right")
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Numerical Methods
Step4: The errors here are significant. What matters is how fast the errors reduce as we change the grid spacing. Try changing from $\Delta x = 1$ to $\Delta x = 0.1$
Step5: We see roughly the expected scaling, with forward and backward differencing errors reducing by roughly $10$, and central differencing errors reducing by roughly $10^2$.
|
10,386
|
<ASSISTANT_TASK:>
Python Code:
L = []
for n in range(12):
L.append(n ** 2)
L
L = [n ** 2 for n in range(12)]
L
L = [n ** 0.5 for n in [11, 22, 33]]
L
[(i, j) for i in range(2) for j in range(3)]
L = []
for i in range(2):
for j in range(3):
if i == 0:
L.append((i, j))
L
[val for val in range(20) if val % 3 == 0 and val != 0]
[val if val % 2 else -val
for val in range(20) if val % 3]
d = {k: v for k, v in zip('abc', range(3))}
d
{k: v for k, v in d.items() if k in ['a', 'c']}
def catch_all(*args, **kwargs):
print("Positional (required) arguments:\t", args)
print("Keyword (optional) arguments:\t", kwargs)
# print(kwargs['pi'])
catch_all(1, 2, 3, a=4, b=5)
catch_all('a', keyword=2)
inputs = (1, 2, 3)
keywords = {'pi': 3.14, 'e': 2.71}
catch_all(*inputs, **keywords)
def fun(a=1, pi=3, e=2):
print(a, pi, e)
fun(**keywords)
class MyClass:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
data = dict(var=123)
a = MyClass(blahblah=456)
a.blahblah
from path import Path
from tempfile import mkdtemp
import uuid
class MockModel(object):
Mock model that generates empty files
def __init__(self, workdir=None):
Initialise the model
Kwargs:
workdir: working directory path;
defaults to a temporary directory
if workdir is None:
self.workdir = Path(mkdtemp())
else:
self.workdir = Path(workdir) # .abspath()
self.workdir.makedirs_p()
def run(self, n, extensions=['tmp']):
Run the model
Args:
n: int, number of random files to generate
assert isinstance(n, int), 'n must be an integer!'
self.files = []
for ext in extensions:
for _ in range(n):
f = self.workdir / 'junk_{filename}.{ext}'.format(filename=uuid.uuid4(), ext=ext)
with f.open('wb') as fout:
random_data = Path('/dev/urandom').open('rb').read(1024)
fout.write(random_data)
self.files.append(f)
def show_results(self, pattern='*'):
return [x for x in sorted(self.workdir.glob(pattern))
if x.isfile()]
def clean_all(self, pattern):
for f in self.workdir.glob(pattern):
f.remove()
def purge(self):
self.workdir.rmtree_p()
m = MockModel('such_folder_wow')
m.show_results()
m.run(2, extensions=['temp', 'pyc', 'out'])
m.show_results()
m.clean_all('*.pyc')
def clean_dir(dirname, pattern):
d = Path(dirname)
for file in d.glob(pattern):
file.remove()
import os
from glob import glob
def clean_dir(dirname, pattern):
absdir = os.path.abspath(dirname)
p = '{absdir}/{pattern}'.format(absdir=absdir, pattern=pattern)
for file in glob(p):
os.remove(file)
clean_dir('such_folder_wow', '*.so')
m.purge()
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
fig, ax = plt.subplots()
mymap = Basemap(ax=ax, projection='npstere', boundinglat=60,
round='True', lon_0=0, lat_0=90)
mymap.drawcoastlines()
mymap.drawmeridians(np.arange(-180, 181, 60), latmax=90);
mymap.drawparallels(np.arange(-90, 91, 5));
import cartopy.crs as ccrs
%matplotlib inline
plt.rcParams['figure.figsize'] = (8, 8)
fig = plt.figure()
ax = fig.add_subplot(111, projection=ccrs.Stereographic(central_latitude=90))
ax.set_extent([-180, 180, 60, 90], ccrs.PlateCarree())
ax.gridlines()
ax.coastlines()
import matplotlib.path as mpath
fig = plt.figure()
ax = fig.add_subplot(111, projection=ccrs.NorthPolarStereo())
ax.coastlines()
ax.set_extent([-180, 180, 60, 90], ccrs.PlateCarree())
ax.gridlines()
# Compute a circle in axes coordinates, which we can use as a boundary
# for the map. We can pan/zoom as much as we like - the boundary will be
# permanently circular.
theta = np.linspace(0, 2*np.pi, 100)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = mpath.Path(verts * radius + center)
ax.set_boundary(circle, transform=ax.transAxes)
HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compare it with this
Step2: As with many Python statements, you can almost read-off the meaning of this statement in plain English
Step3: Conditionals
Step4: Dictionary comprehension
Step5: Keyword arguments in functions (kwargs)
Step6: Cleaning up after model runs
Step10: Aside
Step11: Initialise the object m
Step12: No results yet
Step13: Run the model
Step14: Now, delete all *.pyc files in the working directory
Step15: Take-home code snippet (using path.py)
Step16: Or using os module
Step17: Remove the directory
Step18: Polar ~~stereoscopic~~ stereographic maps
Step19: Now, let's try the same North Polar Stereographic projection plot with cartopy.
Step20: References
|
10,387
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import math, random # 計算用、乱数の生成用ライブラリ
import matplotlib.pyplot as plt # 描画用ライブラリ
class Landmarks:
def __init__(self, array):
self.positions = array # array = [[1個めの星のx座標, 1個めの星のy座標], [2個めの星のx座標, 2個めの星のy座標]...]
def draw(self):
# ランドマークの位置を取り出して描画
xs = [e[0] for e in self.positions]
ys = [e[1] for e in self.positions]
plt.scatter(xs,ys,s=300,marker="*",label="landmarks",color="orange")
def Movement(pos, fw, rot):
# 移動モデル
# posからfw前進、rot回転した位置をリストで返す
# 雑音の入った前進、回転の動き
actual_fw = random.gauss(fw, fw * 0.2) # 20%の標準偏差でばらつく
actual_rot = random.gauss(rot, rot * 0.2) # 20%の標準偏差でばらつく
dir_error = random.gauss(0.0, math.pi / 180.0 * 3.0) # 3[deg]の標準偏差
# 異動前の位置を保存
px, py, pt = pos
# 移動後の位置を計算
x = px + actual_fw * math.cos(pt + dir_error)
y = py + actual_fw * math.sin(pt + dir_error)
t = pt + dir_error + actual_rot # dir_errorを足す
# 結果を返す
return [x,y,t]
def Observation(pos, landmark):
# 観測モデル
# posから見えるランドマークの距離と方向をリストで返す
obss = []
# センサの計測範囲
# 距離0.1 ~ 1
# 角度90 ~ -90[deg]
sensor_max_range = 1.0
sensor_min_range = 0.1
sensor_max_angle = math.pi / 2
sensor_min_angle = -math.pi / 2
# ロボットやパーティクルの位置姿勢を保存
rx, ry, rt = pos
# ランドマークごとに観測
for lpos in landmark.positions:
true_lx, true_ly = lpos
# 観測が成功したらresultをTrue
result = True
# ロボットとランドマークの距離を計算
# センサの範囲外であればresultがFalseに
distance = math.sqrt((rx - true_lx) ** 2 + (ry - true_ly) ** 2)
if distance > sensor_max_range or distance < sensor_min_range:
result = False
# ロボットから見えるランドマークの方向を計算
# こちらもセンサの範囲外であればresultがFalseに
direction = math.atan2(true_ly - ry, true_lx - rx) - rt
if direction > math.pi: direction -= 2 * math.pi
if direction < - math.pi: direction += 2 * math.pi
if direction > sensor_max_angle or direction < sensor_min_angle:
result = False
# 雑音の大きさを設定
# これは尤度計算に使う正規分布関数の分散になる
sigma_d = distance * 0.2 # 20%の標準偏差
sigma_f = math.pi * 3 / 180 # 3degの標準偏差
# 雑音を混ぜる
d = random.gauss(distance, sigma_d)
f = random.gauss(direction, sigma_f)
# 観測データを保存
z = []
z.append([d, f, sigma_d, sigma_f, result])
return z
class Robot:
def __init__(self, x, y, rad):
random.seed()
# ステップごとにロボットの姿勢の真値が入った配列
self.actual_poses = [[x,y,rad]]
def move(self,fw,rot):
# ロボットの位置を記録する(軌跡を残すために配列に入れてる)
self.actual_poses.append(Movement(self.actual_poses[-1], fw, rot))
def observation(self, landmarks):
# 現在地から見た観測データの保存
self.z = Observation(self.actual_poses[-1], landmarks)
# 矢印の描画に必要な位置と方向を計算して描画
def draw(self, sp):
xs = [e[0] for e in self.actual_poses]
ys = [e[1] for e in self.actual_poses]
vxs = [math.cos(e[2]) for e in self.actual_poses]
vys = [math.sin(e[2]) for e in self.actual_poses]
plt.quiver(xs,ys,vxs,vys,color="red",scale=15,angles='xy',scale_units='xy',alpha = 0.3)
def draw(i):
# グラフの設定
fig = plt.figure(i, figsize=(8,8))
sp = fig.add_subplot(111,aspect='equal')
sp.set_xlim(-0.5,2.0)
sp.set_ylim(-0.5,0.5)
# ロボット、ランドマークの描画
for robot in robots:
robot.draw(sp)
if i:
for robot in robots:
for obs in robot.z:
d = obs[0]
f = obs[1]
x = d * math.cos(f)
y = d * math.sin(f)
plt.plot(x, y, "o")
actual_landmarks.draw()
plt.legend()
actual_landmarks = Landmarks([[1.0,0.0]])
robots = []
for i in range(100):
robots.append(Robot(0,0,0))
for robot in robots:
robot.move(1.0, 0)
draw(0)
actual_landmarks = Landmarks([[1.0,0.0]])
robots = []
for i in range(100):
robots.append(Robot(0,0,0))
for robot in robots:
robot.observation(actual_landmarks)
draw(1)
rd = 1
rf = 0
sigma_rd = 1.0 * 0.2
sigma_rf = math.pi * 3 / 180
pd = np.arange(-3, 3, 0.01)
pf = np.arange(-3, 3, 0.01)
d = np.exp(-(rd - pd) ** 2 / (2 * (sigma_rd ** 2))) / (sigma_rd * np.sqrt(2 * np.pi))
f = np.exp(-(rf - pf) ** 2 / (2 * (sigma_rf ** 2))) / (sigma_rf * np.sqrt(2 * np.pi))
fig = plt.figure(figsize=(10,4))
sp = fig.add_subplot(111)
sp.set_xlim(-0.5,2.0)
sp.set_ylim(-0.5,8)
plt.plot(pd, d, color = "orange",label="distance")
plt.plot(pf, f, color = "blue",label="direction")
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 移動モデル
Step2: 観測モデル
Step3: 尤度計算
|
10,388
|
<ASSISTANT_TASK:>
Python Code:
# check firewall
!rm index.html*
!wget www.google.com
import uuid
from google.cloud import dialogflow
# session format: 'projects/*/locations/*/agent/environments/*/users/*/sessions/*'.
def get_session(project_id, session_id, env=None):
Using the same `session_id` between requests allows continuation
of the conversation.
:return: session is a str
if env is None:
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
return session
else:
assert isinstance(env, str)
return f"projects/{project_id}/agent/environments/{env}/sessions/{session_id}"
# [START dialogflow_detect_intent_text]
def detect_intent_texts(project_id, session_id, text, language_code, env=None):
Returns the result of detect intent with texts as inputs.
Using the same `session_id` between requests allows continuation
of the conversation.
session= get_session(project_id, session_id, env=env)
print("Session path: {}\n".format(session))
text_input = dialogflow.TextInput(text=text, language_code=language_code)
query_input = dialogflow.QueryInput(text=text_input)
response = session_client.detect_intent(
request={"session": session, "query_input": query_input}
)
return response
# set credentials, this is a must
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] ="/mnt/d/code/sabala/mega-sabala-9ibe-940e7527ac9b.json"
from google.cloud import dialogflow
project_id = "sabala-348110"
session_id = str(uuid.uuid4())
# texts = ["Me toque uma música", "toca nos 80", "Tocar música clássica", "parar música"][-1:]
language_code = "en-US"
session = get_session(project_id, session_id, env='new')
session
project_id = "mega-sabala-9ibe"
session_id = str(uuid.uuid4())
texts = ["how's the weather today", "Você pode me dizer a maneira mais fácil de ganhar dinheiro?"]
texts = ["parar música"]
language_code = "pt-BR"
response = detect_intent_texts(
project_id, session_id, texts, language_code
)
response.output_audio = "None"
print("=" * 20)
print("Query text: {}".format(response.query_result.query_text))
print(
"Detected intent: {} (confidence: {})\n".format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence,
)
)
print("Fulfillment text: {}\n".format(response.query_result.fulfillment_text))
print("Fulfillment Full: \n{}\n".format(response))
print(type(response))
project_id = "sabala-348110"
session_id = str(uuid.uuid4())
texts = ["Me toque uma música", "toca nos 80", "Tocar música clássica", "parar música"]
language_code = "en-US"
response = detect_intent_texts(
project_id, session_id, texts[-1], language_code
)
print( [(_, type(_)) for _ in dir(response) if not _.startswith("_")] )
response.output_audio = "None"
print(response.query_result.intent)
response = detect_intent_texts(
project_id, session_id, texts[-1], language_code, env='new'
)
# TODO: change the default Entry Point text to handleWebhook
def handleWebhook(request):
req = request.get_json()
responseText = ""
intent = req["queryResult"]["intent"]["displayName"]
if intent == "Default Welcome Intent":
responseText = "Hello from a GCF Webhook"
elif intent == "get-agent-name":
responseText = "My name is Flowhook"
else:
responseText = f"There are no fulfillment responses defined for Intent {intent}"
# You can also use the google.cloud.dialogflowcx_v3.types.WebhookRequest protos instead of manually writing the json object
res = {"fulfillmentMessages": [{"text": {"text": [responseText]}}]}
return res
handleWebhook(response)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Call dialogflow with python api
Step3: examples of making session url
Step4: visit Mega Agent and print
Step5: visit media/Music Agent
Step6: visit active version in a specified environment
Step7: Webhook handle
|
10,389
|
<ASSISTANT_TASK:>
Python Code::
def possible(x,S,N):
minSum = (x * (x + 1))//2
maxSum = (x * ((2 * N) - x + 1))//2
if(S < minSum or S > maxSum):
return False
return True
def findPermutation(N ,L ,R ,S ):
x = R - L + 1
if (not possible( x , S , N)) :
print(" - 1")
return
else :
v = []
for i in range(N , 0 , - 1):
if(( S - i)>= 0 and possible(x - 1 , S - i , i - 1)) :
S = S - i
x -= 1
v . append(i)
if(S == 0):
break
if(S != 0):
print(- 1)
return
v1 = []
for i in range(1 , N + 1):
it = i in v
if(not it):
v1 . append(i)
j = 0
f = 0
for i in range(1 , L):
print(v1[j], end = " ▁ ")
j += 1
for i in range(L , R + 1):
print(v[f], end = " ▁ ")
f += 1
for i in range(R + 1 , N + 1):
print(v1[j], end = " ▁ ")
j += 1
return
if __name__ == " _ _ main _ _ " :
N = 6
L = 3
R = 5
S = 8
findPermutation(N , L , R , S)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
10,390
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import time
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'client')))
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'node')))
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'shared')))
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'micropython')))
import client
from collections import OrderedDict
import pandas as pd
from pandas import DataFrame
from time import sleep
REFRACTORY_PERIOD = 0.1 # 0.1 seconds
neurons = ['n_Alpha', 'n_Beta', 'n_Lambda']
# neurons = ['n_Alpha']
neurons
the_client = client.Client()
the_client.start()
while not the_client.status['Is connected']:
time.sleep(1)
print('Node not ready yet.')
# # Ask Hub for a list of connected nodes
# def list_nodes():
# the_client.node.worker.roll_call()
# time.sleep(2)
# remote_nodes = sorted(the_client.node.worker.contacts.keys())
# print('\n[____________ Connected nodes ____________]\n')
# print('\nConnected nodes:\n{}\n'.format(remote_nodes))
# return remote_nodes
def reset_node(node):
message = {'message_type': 'exec',
'to_exec': 'import machine;machine.reset()'}
the_client.request(node, message)
def fire(node):
message = {'message_type': 'function',
'function': 'fire'}
the_client.request(node, message)
def addConnection(node, neuron):
message = {'message_type': 'function',
'function': 'addConnection',
'kwargs': {'neuron_id': neuron}}
the_client.request(node, message)
def set_connections(node, connections):
message = {'message_type': 'function',
'function': 'setConnections',
'kwargs': {'connections': connections}}
the_client.request(node, message)
def get_connections(node):
message = {'message_type': 'function',
'function': 'getConnections',
'need_result': True}
_, result = the_client.request(node, message)
return result.get()
def setWeight(node, neuron, weight):
message = {'message_type': 'function',
'function': 'setWeight',
'kwargs': {'neuron_id': neuron,
'weight': weight,}}
the_client.request(node, message)
def setThreshold(node, threshold):
message = {'message_type': 'function',
'function': 'setThreshold',
'kwargs': {'threshold': threshold}}
the_client.request(node, message)
def getConfig(node):
message = {'message_type': 'function',
'function': 'getConfig',
'need_result': True}
_, result = the_client.request(node, message)
return result.get()
def getLog(node):
message = {'message_type': 'function',
'function': 'getLog',
'need_result': True}
_, result = the_client.request(node, message)
return result.get()
def emptyLog(node):
message = {'message_type': 'function',
'function': 'emptyLog'}
the_client.request(node, message)
def emptyLogs():
for neuron in neurons:
emptyLog(neuron)
def mergeLogs():
logs = []
for neuron in neurons:
if neuron != the_client.node.worker.name: # exclude client self
currentLog = getLog(neuron)
if currentLog:
logs += currentLog
df = DataFrame(list(logs), columns = ['time', 'neuron', 'message'])
df.set_index('time', inplace = True)
df.sort_index(inplace = True)
return df
def printConfig(neuron):
print('{0:_^78}\n {1}\n'.format(neuron + " config:", getConfig(neuron)))
# reset_node('Hub');
messages = {}
messages['blink_led'] = {'message_type': 'command',
'command': 'blink led',
'kwargs': {'times': 3, 'on_seconds': 0.1, 'off_seconds': 0.1}}
the_client.request('Hub', messages['blink_led']);
addConnection('n_Alpha', 'n_Lambda');
addConnection('n_Beta', 'n_Lambda');
setWeight('n_Lambda', 'n_Alpha', 1);
setWeight('n_Lambda', 'n_Beta', 1);
setThreshold('n_Lambda', 2.8); # input enough to trigger Lambda
fire('n_Alpha');
fire('n_Beta');
setThreshold('n_Lambda', 2.8); # input not enough to trigger Lambda
fire('n_Alpha');
fire('n_Beta');
setThreshold('n_Lambda', 1.8); # input not enough to trigger Lambda
fire('n_Alpha');
setThreshold('n_Lambda', 1.8); # input enough to trigger Lambda
fire('n_Alpha');
fire('n_Beta');
# setThreshold('n_Lambda', 1.8);
# emptyLogs()
# sleep(REFRACTORY_PERIOD)
# fire('n_Alpha')
# fire('n_Beta')
# sleep(2)
# mergeLogs()
# for neuron in reversed(neurons): printConfig(neuron)
# Stopping
the_client.stop()
the_client = None
print ('\n[________________ Demo stopped ________________]\n')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List of neurons
Step2: Start client
Step3: Utility functions
Step4: Reset neurons
Step5: Probe neurons by blinking LEDs
Step6: Setup connections / weights / thresholds
Step7: Stop the demo
|
10,391
|
<ASSISTANT_TASK:>
Python Code:
import requests
import pickle
r = requests.get('http://drgmk.com/sdb/seds/masters/'
'sdb-v2-132436.10-513016.1/public/sdb-v2-132436.10-513016.1-mnest/phoenix_m+modbb_disk_r_.json')
d = r.json()
for k in d.keys():
print(k, type(d[k]))
s = requests.get('http://drgmk.com/sdb/seds/masters/'
'sdb-v2-132436.10-513016.1/public/sdb-v2-132436.10-513016.1-mnest/phoenix_m+modbb_disk_r_.pkl')
r = pickle.loads(s.content)
# print the model component fluxes for the MIRI bands
print(f'filter: {r.model_comps}, total')
for i,f in enumerate(r.all_filters):
if 'NIRCAM' in f:
print(f, r.all_comp_phot[:,i], r.all_phot[i])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: json output
Step2: The information contained in the json is largely related to the observational data, e.g. photometry and models in the observed bands.
|
10,392
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function, division
import numpy
import scipy.stats
import matplotlib.pyplot as pyplot
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# seed the random number generator so we all get the same results
numpy.random.seed(17)
# some nice colors from http://colorbrewer2.org/
COLOR1 = '#7fc97f'
COLOR2 = '#beaed4'
COLOR3 = '#fdc086'
COLOR4 = '#ffff99'
COLOR5 = '#386cb0'
mu1, sig1 = 178, 7.7
male_height = scipy.stats.norm(mu1, sig1)
mu2, sig2 = 163, 7.3
female_height = scipy.stats.norm(mu2, sig2)
def eval_pdf(rv, num=4):
mean, std = rv.mean(), rv.std()
xs = numpy.linspace(mean - num*std, mean + num*std, 100)
ys = rv.pdf(xs)
return xs, ys
xs, ys = eval_pdf(male_height)
pyplot.plot(xs, ys, label='male', linewidth=4, color=COLOR2)
xs, ys = eval_pdf(female_height)
pyplot.plot(xs, ys, label='female', linewidth=4, color=COLOR3)
pyplot.xlabel('height (cm)')
None
male_sample = male_height.rvs(1000)
female_sample = female_height.rvs(1000)
mean1, std1 = male_sample.mean(), male_sample.std()
mean1, std1
mean2, std2 = female_sample.mean(), female_sample.std()
mean2, std2
difference_in_means = male_sample.mean() - female_sample.mean()
difference_in_means # in cm
# Solution goes here
relative_difference = difference_in_means / male_sample.mean()
print(relative_difference * 100) # percent
# A problem with relative differences is that you have to choose which mean to express them relative to.
relative_difference = difference_in_means / female_sample.mean()
print(relative_difference * 100) # percent
simple_thresh = (mean1 + mean2) / 2
simple_thresh
thresh = (std1 * mean2 + std2 * mean1) / (std1 + std2)
thresh
male_below_thresh = sum(male_sample < thresh)
male_below_thresh
female_above_thresh = sum(female_sample > thresh)
female_above_thresh
overlap = male_below_thresh / len(male_sample) + female_above_thresh / len(female_sample)
overlap
misclassification_rate = overlap / 2
misclassification_rate
# Solution goes here
sum(x > y for x, y in zip(male_sample, female_sample)) / len(male_sample)
def CohenEffectSize(group1, group2):
Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / numpy.sqrt(pooled_var)
return d
CohenEffectSize(male_sample, female_sample)
def overlap_superiority(control, treatment, n=1000):
Estimates overlap and superiority based on a sample.
control: scipy.stats rv object
treatment: scipy.stats rv object
n: sample size
control_sample = control.rvs(n)
treatment_sample = treatment.rvs(n)
thresh = (control.mean() + treatment.mean()) / 2
control_above = sum(control_sample > thresh)
treatment_below = sum(treatment_sample < thresh)
overlap = (control_above + treatment_below) / n
superiority = sum(x > y for x, y in zip(treatment_sample, control_sample)) / n
return overlap, superiority
def plot_pdfs(cohen_d=2):
Plot PDFs for distributions that differ by some number of stds.
cohen_d: number of standard deviations between the means
control = scipy.stats.norm(0, 1)
treatment = scipy.stats.norm(cohen_d, 1)
xs, ys = eval_pdf(control)
pyplot.fill_between(xs, ys, label='control', color=COLOR3, alpha=0.7)
xs, ys = eval_pdf(treatment)
pyplot.fill_between(xs, ys, label='treatment', color=COLOR2, alpha=0.7)
o, s = overlap_superiority(control, treatment)
print('overlap', o)
print('superiority', s)
plot_pdfs(2)
slider = widgets.FloatSlider(min=0, max=4, value=2)
interact(plot_pdfs, cohen_d=slider)
None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part One
Step2: The following function evaluates the normal (Gaussian) probability density function (PDF) within 4 standard deviations of the mean. It takes and rv object and returns a pair of NumPy arrays.
Step3: Here's what the two distributions look like.
Step4: Let's assume for now that those are the true distributions for the population.
Step5: Both samples are NumPy arrays. Now we can compute sample statistics like the mean and standard deviation.
Step6: The sample mean is close to the population mean, but not exact, as expected.
Step7: And the results are similar for the female sample.
Step8: On average, men are 14--15 centimeters taller. For some applications, that would be a good way to describe the difference, but there are a few problems
Step9: STOP HERE
Step10: A better, but slightly more complicated threshold is the place where the PDFs cross.
Step11: In this example, there's not much difference between the two thresholds.
Step12: And how many women are above it
Step13: The "overlap" is the total area under the curves that ends up on the wrong side of the threshold.
Step14: Or in more practical terms, you might report the fraction of people who would be misclassified if you tried to use height to guess sex
Step15: Another way to quantify the difference between distributions is what's called "probability of superiority", which is a problematic term, but in this context it's the probability that a randomly-chosen man is taller than a randomly-chosen woman.
Step17: Overlap (or misclassification rate) and "probability of superiority" have two good properties
Step18: Computing the denominator is a little complicated; in fact, people have proposed several ways to do it. This implementation uses the "pooled standard deviation", which is a weighted average of the standard deviations of the two groups.
Step20: Most people don't have a good sense of how big $d=1.9$ is, so let's make a visualization to get calibrated.
Step22: Here's the function that takes Cohen's $d$, plots normal distributions with the given effect size, and prints their overlap and superiority.
Step23: Here's an example that demonstrates the function
Step24: And an interactive widget you can use to visualize what different values of $d$ mean
|
10,393
|
<ASSISTANT_TASK:>
Python Code:
lc = np.loadtxt('data/lc.V.data')
rv1 = np.loadtxt('data/rv1.data')
rv2 = np.loadtxt('data/rv2.data')
b = phoebe.default_binary()
b.add_dataset('lc', times = lc[:,0], fluxes=lc[:,1], sigmas=lc[:,2], passband='Johnson:V')
b.add_dataset('rv', passband='Johnson:V')
b['times@rv@primary'], b['rvs@rv@primary'], b['sigmas@rv@primary'] = rv1[:,0], rv1[:,1], rv1[:,2]
b['times@rv@secondary'], b['rvs@rv@secondary'], b['sigmas@rv@secondary'] = rv2[:,0], rv2[:,1], rv2[:,2]
b.plot(x='times', show=True)
b.set_value('pblum_mode', 'dataset-scaled')
b.set_value_all('distortion_method', 'sphere')
b.add_constraint('requivsumfrac')
b.add_constraint('requivratio')
b.add_constraint('teffratio')
b.flip_constraint('requivratio', solve_for='requiv@secondary')
b.flip_constraint('requivsumfrac', solve_for='requiv@primary')
b.flip_constraint('teffratio', solve_for='teff@secondary')
b.flip_constraint('esinw', solve_for='ecc')
b.flip_constraint('ecosw', solve_for='per0')
b.add_solver('estimator.lc_periodogram', solver='lcperiod_bls',
algorithm='bls', minimum_n_cycles=2, sample_mode='manual',
sample_periods = np.linspace(2.,2.5,1000),
overwrite=True)
b.run_solver('lcperiod_bls', solution='lcperiod_bls_sol', overwrite=True)
print(b['lcperiod_bls_sol'])
b.add_solver('estimator.lc_periodogram', solver='lcperiod_ls',
algorithm='ls',sample_mode='manual',
sample_periods = np.linspace(2.,2.5,1000),
overwrite=True)
b.run_solver('lcperiod_ls', solution='lcperiod_ls_sol', overwrite=True)
print(b['lcperiod_ls_sol'])
b.add_solver('estimator.rv_periodogram', solver='rvperiod', overwrite=True)
b.run_solver('rvperiod', solution='rvperiod_sol',
sample_mode='manual', sample_periods=np.linspace(2.,2.5,1000),
overwrite=True)
print(b['rvperiod_sol'])
np.mean([2.3433433433433435, 2.381881881881882, 2.340840840840841])
b.adopt_solution('lcperiod_bls_sol')
# b['period@binary'] = 2.346
b.plot(x='phase', show=True)
lc_bls_periodogram_results = get_current_values(b, ['period@binary',])
b.adopt_solution('lcperiod_ls_sol')
# b['period@binary'] = 2.346
b.plot(x='phase', show=True)
lc_ls_periodogram_results = get_current_values(b, ['period@binary',])
b.adopt_solution('rvperiod_sol')
# b['period@binary'] = 2.346
b.plot(x='phase', show=True)
rv_periodogram_results = get_current_values(b, ['period@binary',])
b.adopt_solution('lcperiod_bls_sol')
b.add_solver('estimator.ebai', solver='lc_est_ebai_mlp', ebai_method='mlp', phase_bin = False, overwrite=True)
b.run_solver('lc_est_ebai_mlp', solution='lc_soln_ebai_mlp', overwrite=True)
b.adopt_solution('lc_soln_ebai_mlp')
b.run_compute(model='ebai_mlp_model')
b.plot(x='phase',show=True)
ebai_mlp_results = get_current_values(b, ['incl@binary', 'teffratio','requivsumfrac','esinw','ecosw'])
b.add_solver('estimator.ebai', solver='lc_est_ebai_knn', ebai_method='knn', phase_bin = False, overwrite=True)
b.run_solver('lc_est_ebai_knn', solution='lc_soln_ebai_knn', overwrite=True)
b.adopt_solution('lc_soln_ebai_knn')
b.run_compute(model='ebai_knn_model', overwrite=True)
b.plot(x='phase',show=True)
ebai_knn_results = get_current_values(b, ['incl@binary', 'teffratio','requivsumfrac','esinw','ecosw'])
b.add_solver('estimator.lc_geometry', solver='lc_est_lcgeom', phase_bin = False)
b.run_solver('lc_est_lcgeom', solution='lc_soln_lcgeom')
b.flip_constraint('ecc', solve_for='esinw')
b.flip_constraint('per0', solve_for='ecosw')
b.adopt_solution('lc_soln_lcgeom')
b.run_compute(model='lc_geometry_model')
b.plot(x='phase', legend=True, save='testcase_estimators.png', show=True)
b.flip_constraint('esinw', solve_for='ecc')
b.flip_constraint('ecosw', solve_for='per0')
lc_geometry_results = get_current_values(b, ['incl@binary', 'teffratio','requivsumfrac','esinw','ecosw'])
b.add_solver('estimator.rv_geometry', solver='rvgeom')
b.run_solver('rvgeom', solution='rvgeom_sol')
b['adopt_parameters@rvgeom_sol'] = ['q', 'asini@binary', 'vgamma']
b.flip_constraint('asini@binary', solve_for='sma@binary')
# b.flip_constraint('ecc', solve_for='esinw')
# b.flip_constraint('per0', solve_for='ecosw')
b.adopt_solution('rvgeom_sol')
# b['period@binary'] = 2.345678901
b.run_compute(model='rvgeom_model')
b.plot(x='phase', model='rvgeom_model', legend=True, show=True)
rv_geometry_results = get_current_values(b, ['q', 'asini@binary','vgamma'])
b.save('bundles/after_estimators.bundle')
times = b.get_value('times', context='dataset', dataset='lc01')
phases = b.to_phase(times)
fluxes_true = b.get_value('fluxes', context='dataset', dataset='lc01')
sigmas_true = b.get_value('sigmas', context='dataset', dataset='lc01')
times_rv = b.get_value('times', context='dataset', component='primary', dataset='rv01')
phases_rv = b.to_phase(times_rv)
rvs1 = b.get_value('rvs', context='dataset', component='primary', dataset='rv01')
rvs2 = b.get_value('rvs', context='dataset', component='secondary', dataset='rv01')
sigmas1 = b.get_value('sigmas', context='dataset', component='primary', dataset='rv01')
sigmas2 = b.get_value('sigmas', context='dataset', component='secondary', dataset='rv01')
lc_ebai_mlp = get_model(b, model='ebai_mlp_model', dataset='lc01', phase_order=True)
lc_ebai_knn = get_model(b, model='ebai_knn_model', dataset='lc01', phase_order=True)
lc_geom = get_model(b, model='lc_geometry_model', dataset='lc01', phase_order=True)
rv_geom1, rv_geom2 = get_model(b, model='rvgeom_model', dataset='rv01', model_type='rv', phase_order=True)
fig, ((ax1, ax1b, ax2, ax3), (ax4, ax4b, ax5, ax6)) = plt.subplots(nrows = 2, ncols = 4, figsize=(7.25,2),
gridspec_kw={'height_ratios': [2, 1]})
fig.subplots_adjust(hspace=0, wspace=0.3)
lc_datapoints = {'fmt': '.', 'ms': 1, 'c': '0.5', 'zorder':0,}
rv1_datapoints = {'fmt': ',', 'c': '0.0', 'zorder':0}
rv2_datapoints = {'fmt': ',', 'c': '0.5', 'zorder':0}
model_kwargs = {'lw': 1, 'zorder': 1}
res_kwargs = {'s': 0.5}
res_rv_kwargs = {'s': 3}
for ax in [ax1, ax1b, ax2]:
ax.errorbar(x=phases, y=fluxes_true, yerr=sigmas_true, rasterized=True, **lc_datapoints)
ax3.errorbar(x=phases_rv, y=rvs1, yerr=sigmas1, rasterized=True, **rv1_datapoints)
ax3.errorbar(x=phases_rv, y=rvs2, yerr=sigmas2, rasterized=True, **rv2_datapoints)
ax1.plot(lc_ebai_mlp[:,1], lc_ebai_mlp[:,2], c=phoebe_c['orange'], **model_kwargs)
ax4.scatter(lc_ebai_mlp[:,1], lc_ebai_mlp[:,3], c=phoebe_c['orange'], **res_kwargs)
ax1b.plot(lc_ebai_knn[:,1], lc_ebai_knn[:,2], c=phoebe_c['blue'], **model_kwargs)
ax4b.scatter(lc_ebai_knn[:,1], lc_ebai_knn[:,3], c=phoebe_c['blue'], **res_kwargs)
ax2.plot(lc_geom[:,1], lc_geom[:,2],c=phoebe_c['green'], **model_kwargs)
ax5.scatter(lc_geom[:,1], lc_geom[:,3], c=phoebe_c['green'], **res_kwargs)
ax3.plot(rv_geom1[:,1], rv_geom1[:,2], c=phoebe_c['blue'], label='primary', **model_kwargs)
ax3.plot(rv_geom2[:,1], rv_geom2[:,2], c=phoebe_c['red'], label='secondary', **model_kwargs)
ax6.scatter(rv_geom1[:,1], rv_geom1[:,3], c=phoebe_c['blue'], **res_rv_kwargs)
ax6.scatter(rv_geom2[:,1], rv_geom2[:,3], c=phoebe_c['red'],**res_rv_kwargs)
# ax3.legend()
for ax in [ax1, ax2, ax3]:
ax.set_xticks([])
# for ax in [ax4, ax5, ax6]:
# ax.set_xlabel('Phase')
# ax1.set_ylabel('Flux [W/m$^2$]')
# ax2.set_ylabel('Flux [W/m$^2$]', labelpad=12)
# ax3.set_ylabel('RV [km/s]')
# ax4.set_ylabel('Residuals [W/m$^2$]')
# ax5.set_ylabel('Residuals [W/m$^2$]')
# ax6.set_ylabel('Residuals [km/s]')
ax1.set_title('EBAI - MLP', pad=14)
ax1b.set_title('EBAI - kNN', pad=14)
ax2.set_title('lc\_geometry', pad=14)
ax3.set_title('rv\_geometry', pad=14)
# fig.tight_layout()
fig.savefig('figs/2_estimators_data.pdf', dpi=300)
truths, twigs, labels = get_truths_labels()
true_vals = {}
for twig, value in zip(twigs, truths):
true_vals[twig] = value
twigs = [
'period@binary',
'incl@binary',
'teffratio',
'requivsumfrac',
'esinw',
'ecosw',
'q',
'asini@binary',
'vgamma',
]
labels = [r'$P$',
r'$i$',
r'$T_{\mathrm{eff},2}/T_{\mathrm{eff},1}$',
r'$r_1+r_2$',
r'$e\sin\omega$',
r'$e\cos\omega$',
r'$q$',
r'$a\sin i$',
r'$v_{\gamma}$'
]
fig, axes = plt.subplots(nrows = 1, ncols = len(labels), figsize=(10,1.5))
fig.subplots_adjust(hspace=0, wspace=0.1)
models = [lc_bls_periodogram_results, lc_ls_periodogram_results, rv_periodogram_results, ebai_mlp_results, ebai_knn_results, lc_geometry_results, rv_geometry_results]
model_labels = [r'lc_periodogram (BLS)', r'lc_periodogram (LS)', r'rv_periodogram', r'EBAI_mlp', r'EBAI_knn', r'lc_geometry', r'rv_geometry']
colors = [phoebe_c['black'], phoebe_c['black'], phoebe_c['black'], phoebe_c['orange'], phoebe_c['blue'], phoebe_c['green'], phoebe_c['purple']]
# import cmasher as cmr
# colors = cmr.take_cmap_colors('cmr.rainforest', len(models), cmap_range=(0.05, 0.85), return_fmt='hex')
print('model, twig, current_solution, previous_lc, previous_rv')
for ax, label, twig in zip(axes, labels, twigs):
ax.set_title(label)
# ax.set_ylabel(ylabel)
# ax.set_yticks([])
ax.margins(0.25)
for i, model in enumerate(models):
ax.axhline(i, linestyle='--', lw=0.5, color='gray')
if twig in model.keys():
# print(model_labels[i], twig, model[twig], b_prev_lc.get_value(twig), b_prev_rv.get_value(twig))
ax.scatter(model[twig], i, c=colors[i], s=50, marker='x', zorder=1)
# ax.scatter(b_prev_lc.get_value(twig), i, marker='o', fc='gray', ec='none')
# ax.scatter(b_prev_rv.get_value(twig), i, marker='o', fc='none', ec='gray')
else:
pass
ax.axvline(x=true_vals[twig], ls=':', lw=1.5, c=phoebe_c['red'], zorder=0)
ax.set_ylim(-0.5, len(models)-1+0.5)
for i,ax in enumerate(axes):
# ax.grid(visible=True, which='major', axis='y', linestyle='--')
if i==0:
ax.set_yticks(np.arange(0,len(model_labels),1),model_labels)
else:
ax.yaxis.set_ticklabels([])
# fig.tight_layout()
fig.savefig('figs/3_estimators_vals.pdf', dpi=300)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will set the pblum mode to dataset-scaled for estimators and optimizers, to avoid having to add pblum to the fitted parameters or adjusting it manually. We will also set distortion_method to 'sphere' to speed up the computation of the light curve.
Step2: Set up and flip some constraints needed for adopting the solutions from the estimators
Step3: Periodograms
Step4: The lc_periodogram with algorithm='bls' seems to find the best period, so we'll keep that one moving forward
Step5: V light curve - lc estimators
Step6: RV - rv_geometry
Step7: Plot results
Step8: parameter values
|
10,394
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install tensorflow==2.1 --user
from google.cloud import bigquery
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import shutil
%%bigquery
SELECT
FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude, dropoff_longitude,
dropoff_latitude, passenger_count, trip_distance, tolls_amount,
fare_amount, total_amount
FROM `nyc-tlc.yellow.trips`
LIMIT 10
%%bigquery trips
SELECT
FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude,
dropoff_longitude, dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
print(len(trips))
# We can slice Pandas dataframes as if they were arrays
trips[:10]
ax = sns.regplot(x="trip_distance", y="fare_amount", fit_reg=False, ci=None, truncate=True, data=trips)
ax.figure.set_size_inches(10, 8)
%%bigquery trips
SELECT
FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude,
dropoff_longitude, dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
AND trip_distance > 0 AND fare_amount >= 2.5
print(len(trips))
ax = sns.regplot(x="trip_distance", y="fare_amount", fit_reg=False, ci=None, truncate=True, data=trips)
ax.figure.set_size_inches(10, 8)
tollrides = trips[trips['tolls_amount'] > 0]
tollrides[tollrides['pickup_datetime'] == '2012-02-27 09:19:10 UTC']
trips.describe()
def showrides(df, numlines):
lats = []
lons = []
for iter, row in df[:numlines].iterrows():
lons.append(row['pickup_longitude'])
lons.append(row['dropoff_longitude'])
lons.append(None)
lats.append(row['pickup_latitude'])
lats.append(row['dropoff_latitude'])
lats.append(None)
sns.set_style("darkgrid")
plt.figure(figsize=(10,8))
plt.plot(lons, lats)
showrides(trips, 10)
showrides(tollrides, 10)
def preprocess(trips_in):
trips = trips_in.copy(deep=True)
trips.fare_amount = trips.fare_amount + trips.tolls_amount
del trips['tolls_amount']
del trips['total_amount']
del trips['trip_distance'] # we won't know this in advance!
qc = np.all([\
trips['pickup_longitude'] > -78, \
trips['pickup_longitude'] < -70, \
trips['dropoff_longitude'] > -78, \
trips['dropoff_longitude'] < -70, \
trips['pickup_latitude'] > 37, \
trips['pickup_latitude'] < 45, \
trips['dropoff_latitude'] > 37, \
trips['dropoff_latitude'] < 45, \
trips['passenger_count'] > 0,
], axis=0)
return trips[qc]
tripsqc = preprocess(trips)
tripsqc.describe()
shuffled = tripsqc.sample(frac=1)
trainsize = int(len(shuffled['fare_amount']) * 0.70)
validsize = int(len(shuffled['fare_amount']) * 0.15)
df_train = shuffled.iloc[:trainsize, :]
df_valid = shuffled.iloc[trainsize:(trainsize+validsize), :]
df_test = shuffled.iloc[(trainsize+validsize):, :]
df_train.head(n=1)
df_train.describe()
df_valid.describe()
df_test.describe()
def to_csv(df, filename):
outdf = df.copy(deep=False)
outdf.loc[:, 'key'] = np.arange(0, len(outdf)) # rownumber as key
# reorder columns so that target is first column
cols = outdf.columns.tolist()
cols.remove('fare_amount')
cols.insert(0, 'fare_amount')
print (cols) # new order of columns
outdf = outdf[cols]
outdf.to_csv(filename, header=False, index_label=False, index=False)
to_csv(df_train, 'taxi-train.csv')
to_csv(df_valid, 'taxi-valid.csv')
to_csv(df_test, 'taxi-test.csv')
!head -10 taxi-valid.csv
!ls -l *.csv
%%bash
head taxi-train.csv
def distance_between(lat1, lon1, lat2, lon2):
# haversine formula to compute distance "as the crow flies". Taxis can't fly of course.
dist = np.degrees(np.arccos(np.minimum(1,np.sin(np.radians(lat1)) * np.sin(np.radians(lat2)) + np.cos(np.radians(lat1)) * np.cos(np.radians(lat2)) * np.cos(np.radians(lon2 - lon1))))) * 60 * 1.515 * 1.609344
return dist
def estimate_distance(df):
return distance_between(df['pickuplat'], df['pickuplon'], df['dropofflat'], df['dropofflon'])
def compute_rmse(actual, predicted):
return np.sqrt(np.mean((actual-predicted)**2))
def print_rmse(df, rate, name):
print ("{1} RMSE = {0}".format(compute_rmse(df['fare_amount'], rate*estimate_distance(df)), name))
FEATURES = ['pickuplon','pickuplat','dropofflon','dropofflat','passengers']
TARGET = 'fare_amount'
columns = list([TARGET])
columns.append('pickup_datetime')
columns.extend(FEATURES) # in CSV, target is the first column, after the features
columns.append('key')
df_train = pd.read_csv('taxi-train.csv', header=None, names=columns)
df_valid = pd.read_csv('taxi-valid.csv', header=None, names=columns)
df_test = pd.read_csv('taxi-test.csv', header=None, names=columns)
rate = df_train['fare_amount'].mean() / estimate_distance(df_train).mean()
print ("Rate = ${0}/km".format(rate))
print_rmse(df_train, rate, 'Train')
print_rmse(df_valid, rate, 'Valid')
print_rmse(df_test, rate, 'Test')
validation_query =
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
client = bigquery.Client()
df_valid = client.query(validation_query).to_dataframe()
print_rmse(df_valid, 2.59988, 'Final Validation Set')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Please ignore any compatibility warnings and errors
Step2: <h3> Extract sample data from BigQuery </h3>
Step3: Let's increase the number of records so that we can do some neat graphs. There is no guarantee about the order in which records are returned, and so no guarantee about which records get returned if we simply increase the LIMIT. To properly sample the dataset, let's use the HASH of the pickup time and return 1 in 100,000 records -- because there are 1 billion records in the data, we should get back approximately 10,000 records if we do this.
Step4: <h3> Exploring data </h3>
Step5: Hmm ... do you see something wrong with the data that needs addressing?
Step6: What's up with the streaks around 45 dollars and 50 dollars? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable.
Step7: Looking a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool.
Step8: Hmm ... The min, max of longitude look strange.
Step9: As you'd expect, rides that involve a toll are longer than the typical ride.
Step10: The quality control has removed about 300 rows (11400 - 11101) or about 3% of the data. This seems reasonable.
Step11: Let's write out the three dataframes to appropriately named csv files. We can use these csv files for local training (recall that these files represent only 1/100,000 of the full dataset) just to verify our code works, before we run it on all the data.
Step12: <h3> Verify that datasets exist </h3>
Step13: We have 3 .csv files corresponding to train, valid, test. The ratio of file-sizes correspond to our split of the data.
Step14: Looks good! We now have our ML datasets and are ready to train ML models, validate them and evaluate them.
Step16: <h2>Benchmark on same dataset</h2>
|
10,395
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
#Loads important files for this analysis
filename = 'baseballdatabank-2017.1\core\Teams.csv'
teams_df = pd.read_csv(filename)
filename = 'baseballdatabank-2017.1\core\Salaries.csv'
salaries_df = pd.read_csv(filename)
filename = 'baseballdatabank-2017.1\core\Master.csv'
master_df = pd.read_csv(filename)
filename = 'baseballdatabank-2017.1\core\CollegePlaying.csv'
college_df = pd.read_csv(filename)
filename = 'baseballdatabank-2017.1\core\Appearances.csv'
appearances_df = pd.read_csv(filename)
#In the master table get those players who went to college or not
went = college_df.playerID.unique()
#append the new column to the original dataFrame
master_df['went_to_college'] = master_df.playerID.isin(went)
#get players who attended to college and those who didn't
college_playing = master_df[master_df['went_to_college']]
non_college_playing = master_df[master_df['went_to_college']==False]
#now get the salaries of the players to describe and compare
#create an index dataFrame of the ssalaries table to splti the table
#no need to append this column to that table
salaries_df['went_to_college'] = salaries_df.playerID.isin(went)
college_player_salaries = salaries_df[salaries_df['went_to_college']]
non_college_player_salaries = salaries_df[salaries_df['went_to_college']==False]
#Salaries on year 2016
college_salaries2016 = college_player_salaries[college_player_salaries.yearID==2016]
non_college_salaries2016 = non_college_player_salaries[non_college_player_salaries.yearID==2016]
#Salaries on year 2015
college_salaries2015 = college_player_salaries[college_player_salaries.yearID==2015]
non_college_salaries2015 = non_college_player_salaries[non_college_player_salaries.yearID==2015]
#Salaries on year 2014
college_salaries2014 = college_player_salaries[college_player_salaries.yearID==2014]
non_college_salaries2014 = non_college_player_salaries[non_college_player_salaries.yearID==2014]
def put_salaries_hist(row, col, df, axes, lbl,year):
Draws an histogram whit a line on the average, to depict the distribution
of the salaries data, and prints lables and legends.
titlestr = '{}College Players, year {}'
xlabel = '\nIncome x10 Million Dollars\n\n'
#plt.title('Left Title', loc='left')
axes[ax,col].title.set_text(titlestr.format(lbl, year))
axes[ax,col].set_ylabel('Count')
#divide the average salalry in the data between 1 million
meanmd = df.salary.values.mean() /1000000
axes[ax,col].set_xlabel(xlabel)
#put the histogram
axes[ax,col].hist(df.salary.values)
#draw a line for the average of the group
axes[ax,col].axvline(df.salary.values.mean(), color='c', linestyle='dashed',
linewidth=2, label='Avg ${0:.2f} MD'.format(meanmd))
#show legends
legend = axes[ax,col].legend(loc='upper center', shadow=True)
%pylab inline
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(9, 10), sharey=True,sharex=False,edgecolor='k')
#put the resulting DataFrames into lists to easily get the charts
ylist = ['2014','2015','2016']
alist = [college_salaries2014 , college_salaries2015, college_salaries2016]
blist = [non_college_salaries2014 , non_college_salaries2015, non_college_salaries2016]
#row counter
ax = 0
#string to put label on each chart
for ax in range( len(alist)):
#the figure has 2 columns, the column on the left has the colleger players data
# the column on the right has the non college players data
year = ylist[ax]
#college players salaries
adf = alist[ax]
put_salaries_hist(ax,0,adf,axes,'',year)
#non college players salaries
adf = blist[ax]
put_salaries_hist(ax,1,adf,axes,'Non ',year)
ax = ax + 1
fig.subplots_adjust(hspace = 1.1,wspace=0.8)
#get the avegare salary by year
#year average wages for college player
clgmeans = college_player_salaries.groupby('yearID').salary.mean()
#year average wages for non college player
nclgmeans = non_college_player_salaries.groupby('yearID').salary.mean()
#year average wages for all player
allmean = salaries_df.groupby('yearID').salary.mean()
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 7), sharex=False)
#Draw a plot chart to contain the three results
axes.title.set_text('Average salary for all playes from 1985 ot 2016')
axes.plot(clgmeans,label='College', marker='o')
axes.plot(nclgmeans, label='Non College')
axes.plot(allmean, label='All', marker='+')
axes.set_xlabel('Year')
axes.set_ylabel('Player income')
legend = axes.legend(loc='upper center', shadow=True)
def count_college_players(row):
Counts the number of college players a team has had every year,
only counts player who attended college on, or before the year of
each record. This function is designed to be used with a DataFram.apply call
works on values of a row and returns the count of players
args:
row a row of the master_df Dataframe
return:
the count of college players for a team on one year
teamID = row['teamID']
yearID = row['yearID']
#from the appearances table het the player from each team that year
yrteampls = appearances_df[appearances_df['yearID']==yearID]
yrteampls = yrteampls[ yrteampls['teamID']==teamID]
#now get the players that went to college on, or before that year
clgplyr = college_df[college_df['yearID']<=yearID]
#merge the filtered appearances and college table
merged = yrteampls.merge(clgplyr,
on=['playerID'],
how='inner')
#by counting the number of resulting records of the merging
#we can know how many college player had that team on that year
count = len(merged)
return count
#count the number of college players each team had each year
teams_df['college_player_count'] = teams_df.apply(count_college_players,axis=1)
#get two groups, winners of World Series and Non winners
winnners_clg_plyrs = teams_df[teams_df.WSWin=='Y']
not_winnners_clg_plyrs = teams_df[teams_df.WSWin=='N']
def put_college_winners_scatter(col, axes, df, title):
Draws a scatter plots of the count of college player in the DataFrame
args:
col - Column of the subplot
axes - Axes array of the matplotlib figure
df - DataFre with the used data
axes[col].title.set_text(title)
axes[col].scatter(df['yearID'].values,
df['college_player_count'].values)
axes[col].set_xlabel('Year')
axes[col].set_ylabel('Number of college players')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5), sharey=True,sharex=False)
put_college_winners_scatter(0,axes,winnners_clg_plyrs,
'Winners of World Series and\nnumber of college players')
put_college_winners_scatter(1,axes,not_winnners_clg_plyrs,
'Non winners of World Series and\nnumber of college players')
winnners_clg_plyrs_b1910 = winnners_clg_plyrs[winnners_clg_plyrs['yearID']<=1910]
not_winnners_clg_plyrs_b1910 = not_winnners_clg_plyrs[not_winnners_clg_plyrs['yearID']<=1910]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5), sharey=True,sharex=False)
put_college_winners_scatter(0,axes,winnners_clg_plyrs_b1910,
'Winners of World Series and\nnumber of college players\nbefore 1910')
put_college_winners_scatter(1,axes,not_winnners_clg_plyrs_b1910,
'Non winners of World Series and\nnumber of college players\nbefore 1910')
winnners_clg_plyrs_50_70 = winnners_clg_plyrs[winnners_clg_plyrs['yearID']>=1950]
winnners_clg_plyrs_50_70 = winnners_clg_plyrs_50_70[winnners_clg_plyrs_50_70['yearID']<=1970]
not_winnners_clg_plyrs_50_70 = not_winnners_clg_plyrs[not_winnners_clg_plyrs['yearID']>=1950]
not_winnners_clg_plyrs_50_70 = not_winnners_clg_plyrs_50_70[not_winnners_clg_plyrs_50_70['yearID']<=1970]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5), sharey=True,sharex=False)
put_college_winners_scatter(0,axes,winnners_clg_plyrs_50_70,
'Winners of World Series and\nnumber of college players\nbeetwen 1950 and 1970')
put_college_winners_scatter(1,axes,not_winnners_clg_plyrs_50_70,
'Non winners of World Series and\nnumber of college players\nbeetwen 1950 and 1970')
#the teams_df now has a column with the count of college players
teams_df = teams_df.sort_values(by='yearID', ascending=False)
teams_clgplr2016 = teams_df[teams_df['yearID']==2016]
x = teams_clgplr2016.W.values
y = teams_clgplr2016.college_player_count.values
fig, axScatter = plt.subplots(figsize=(5.5, 5.5))
axScatter.title.set_text('Games won by team and number of college players\nOn year 2016')
axScatter.scatter(x,y,label='a label')
axScatter.set_xlabel('Games Won')
axScatter.set_ylabel('Number of college players')
teams_clgplr20yrs = teams_df[teams_df['yearID']>=(2016-20)]
x = teams_clgplr20yrs.W.values
y = teams_clgplr20yrs.college_player_count.values
fig, axScatter = plt.subplots(figsize=(5.5, 5.5))
axScatter.title.set_text('Games won by team and number of college players\nFrom 1996 to 2016')
axScatter.scatter(x,y,label='a label')
axScatter.set_xlabel('Games Won')
axScatter.set_ylabel('Number of college players')
#load the CPI data
filename = 'cpi_1985_2017.csv'
cpi_df = pd.read_csv(filename)
def get_year_cpi(year):
try:
return cpi_df[cpi_df['yearID']==year].cpi.values[0]
except:
return 0
current_cpi = get_year_cpi(2017)
def convert_wages(row):
This function convert salaries to today's values
using the Customer Price Index
year = row['yearID']
old_value = row['salary']
old_cpi = get_year_cpi(year)
today_value = ((old_value * (current_cpi/old_cpi)))
return today_value
salaries_df['todays_salary'] = salaries_df.apply(convert_wages, axis=1)
allmean_cpi_adj = salaries_df.groupby('yearID').todays_salary.mean()
clgmeans = salaries_df[salaries_df['went_to_college']].groupby('yearID').todays_salary.mean()
nclgmeans = salaries_df[salaries_df['went_to_college'] == False].groupby('yearID').todays_salary.mean()
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 7), sharex=False)
#Draw a plot chart to contain the three results
axes.title.set_text('Average salary for all playes from 1985 ot 2016\n Adjusted for inflation')
axes.plot(clgmeans,label='College', marker='o')
axes.plot(nclgmeans, label='Non College')
axes.plot(allmean_cpi_adj, label='All', marker='+')
axes.set_xlabel('Year')
axes.set_ylabel('Player income')
legend = axes.legend(loc='upper center', shadow=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: At first glance
Step5: 1. On a yearly basis, does Baseball players who attended to college have greater income compared to those who didn't?
Step6: 2. Having College graduated player gives teams better chance to win World Series?
Step7: There are some interesting spots in the charts, there are gaps between <br>
Step8: In this case for year 2016, those teams with more college students <br>
Step10: Final Conclusion
|
10,396
|
<ASSISTANT_TASK:>
Python Code:
import graphviz as gv
def heapToDot(A):
n = len(A)
dot = gv.Digraph(node_attr={'shape': 'record'})
for k, (p, o) in enumerate(A):
if str(p) != str(o):
dot.node(str(k), label='{' + str(p) + '|' + str(o) + '|' + str(o.mIndex) + '|' + str(k) + '}', style='rounded')
else:
dot.node(str(k), label='{' + str(p) + '|' + str(k) + '}', style='rounded')
for k in range(0, n // 2):
if 2 * k + 1 < n:
dot.edge(str(k), str(2 * k + 1))
if 2 * k + 2 < n:
dot.edge(str(k), str(2 * k + 2))
return dot
def swap(A, i, j):
pi, oi = A[i]
pj, oj = A[j]
oi.mIndex = j
oj.mIndex = i
A[i] = pj, oj
A[j] = pi, oi
def ascend(A, k):
while k > 0:
p = (k - 1) // 2
if A[k] < A[p]:
swap(A, p, k)
k = p
else:
return k
return 0
def descend(A):
n = len(A) - 1
k = 0
while 2 * k + 1 <= n:
j = 2 * k + 1
if j + 1 <= n and A[j] > A[j + 1]:
j += 1
if A[k] < A[j]:
return
swap(A, k, j)
k = j
def insert(H, x):
n = len(H)
H.append(x)
_, o = x
o.mIndex = n
k = ascend(H, n)
o.mIndex = k
def elevate(H, o, p):
k = o.mIndex
H[k] = (p, o)
ascend(H, k)
def remove(H):
pFirst, oFirst = H[0]
pLast , oLast = H[-1]
oLast.mIndex = 0
H[0] = pLast, oLast
H.pop(-1)
descend(H)
return pFirst, oFirst
class Node:
def __init__(self, value):
self.mValue = value
def __hash__(self):
return hash(self.mValue)
def __repr__(self):
try:
return 'Node(' + str(self.mValue) + ':' + str(self.mIndex) + ')'
except AttributeError:
return 'Node(' + str(self.mValue) + ')'
def __str__(self):
return str(self.mValue)
def __eq__(self, other):
if other == None:
return False
return self.mValue == other.mValue
def __lt__(self, other):
return self.mValue < other.mValue
import string
def demo1():
L = [ (i, Node(c)) for i, c in enumerate(string.ascii_lowercase) ]
_, w = L[22]
H = []
for i, x in enumerate(L):
insert(H, x)
display(heapToDot(H))
print('Elevating "w" to priority 2:')
elevate(H, w, 2)
display(heapToDot(H))
demo1()
import random as rnd
def heap_sort(L):
H = []
for x in L:
insert(H, x)
display(heapToDot(H))
S = []
while H != []:
p, _ = remove(H)
display(heapToDot(H))
S.append(p)
return S
def demo2():
L = [ rnd.randrange(1, 200) for n in range(12) ]
L = [ (n, Node(n)) for n in L ]
print("L = ", L)
S = heap_sort(L)
print("S = ", S)
demo2()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function toDot takes four arguments
Step2: Priority Queues implemented as Heaps
Step3: The function ascend takes two arguments
Step4: The function descend(A) takes one argument A where A is an array that is organized as a heap.
Step5: Implementing the API
Step6: The function elevate(H, o, p) takes three arguments.
Step7: Testing
|
10,397
|
<ASSISTANT_TASK:>
Python Code:
# Use a question mark after a command to get more info about it
a = [1, 2, 3]
a.extend?
# Use double question marks to get the source code if available
def myfunc(x):
Fancy function
return x + 2
myfunc?
myfunc??
# Magic commands start with a % sign
%timeit print "foo"
# You can add parameters to magic commands, just like you would do for commandline commands
%timeit -n 5 print "foo" # Only do 5 loops, instead of the default 10k
# You can also ask for help for magic commands using the question mark
%timeit?
# See a quick reference of other commands:
%quickref
# There are a lot more specicial commands
%ls # same as linux ls command: list files in directory
# Run an external script
%run myscript.py
# If you forget to store the result of your last statement in a variable,
# you can always access it using the special underscore variable, like so:
5+3
_
# You can also access previous results by referencing the output number using _X
# where X is the output cell you want to reference:
_39
# Or the input number using _iX
_i36
# You can also easily execute OS commands using !
!ls -lh
output = !ls -lh
output
# You can reference other variables by prefixing them by a dollar sign $
dirname = "/tmp"
!ls $dirname
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: IPython/Jupyter
Step2: Magic Commands
Step3: Executing shell commands
|
10,398
|
<ASSISTANT_TASK:>
Python Code:
import bigbang.mailman as mailman
import bigbang.graph as graph
import bigbang.process as process
from bigbang.parse import get_date
#from bigbang.functions import *
from bigbang.archive import Archive
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import numpy as np
import math
import pytz
import pickle
import os
pd.options.display.mpl_style = 'default' # pandas has a set of preferred graph formatting options
arx = Archive("https://lists.wikimedia.org/pipermail/wikimedia-l/",archive_dir="../archives")
acts = arx.get_activity()
participants = (acts > 0).sum(1)
window = 20
plt.figure(figsize=(12.5, 7.5))
rmpa = pd.rolling_mean(participants,window)
rmpadna = rmpa.dropna()
plt.plot_date(rmpadna.index,
rmpadna.values,
'r',
xdate=True)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Also, let's import a number of other dependencies we'll use later.
Step2: Now let's load the data for analysis. Load the Archive, the get the count of number of emails sent per day (the 'activity').
Step3: How can we plot the number of participants over time?
Step4: This variable is for the range of days used in computing rolling averages.
Step5: For each of the mailing lists we are looking at, plot the rolling average of number of emails sent per day.
|
10,399
|
<ASSISTANT_TASK:>
Python Code:
import time
def timer():
start = time.time()
def f(report=False):
elapsed = time.time() - start
if report:
print ("time elapsed %5.3f" % elapsed)
return elapsed
return f
limit = 250
mytimer = timer()
can_reach = set()
numbers = set(range(1,100))
for i in range(0,limit):
for j in range (0,limit):
num = 4*i + 9*j
can_reach = can_reach | {num}
can_not_reach = numbers - can_reach
t_sqr = mytimer(True)
can_not_reach
mytimer = timer()
can_reach = set()
numbers = set(range(1,100))
for i in range(0,limit):
for j in range (0,i+1):
num = 4*j + 9*(i-j)
#print ("%i = 4*%i + 9*%i" % (num,j,i-j))
can_reach = can_reach | {num}
can_not_reach = numbers - can_reach
t_tria = mytimer(True)
can_not_reach
from operator import add
from itertools import product
mytimer = timer()
fours = range(0,4*limit,4)
nines = range(0,9*limit,9)
fours_and_nines = product(fours, nines)
fours_plus_nines = map( lambda x: add (*x), fours_and_nines)
set(range(100)) - set(fours_plus_nines)
t_func = mytimer(True)
print ("triangle sum vs square sum = %2.0fx" % (t_sqr / t_tria))
print ("functional vs triangle sum = %2.0fx" % (t_tria / t_func))
print ("functional vs square sum = %2.0fx" % (t_sqr / t_func))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We are only looking for combinations with $i<limit$
Step2: Procedural Style - square
Step3: Procedural Style - triangle
Step4: Functional Style
Step5: Results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.