text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Chunking an Image Dataset for Minibatch Training using NumPy NPZ Archives
Step2: Note that the names of the subdirectories correspond directly to the class label of the images that are stored under it.
Step3: Note
Step4: The .npz files we created are stored under a new directory, mnist_npz
Step5: To check that the archiving worked correctly, we will now load one of those .npz archives. Note that we can now access each archive just like a python dictionary. Here the 'data' key contains the image data and the 'labels' key stores an array containing the corresponding class labels
Step6: 2. Loading Minibatches
Step7: The following for loop will iterate over the 45,000 training examples in our MNIST training set, yielding 50 images and labels at a time (note that we previously set aside 5000 training example as our validation datast).
Step8: As we can see from the benchmark above, an iteration over one training epoch (45k images) is relatively fast.
Step9: Training the Neural Network with Minibatches
|
<ASSISTANT_TASK:>
Python Code:
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p tensorflow,numpy
# Note that executing the following code
# cell will download the MNIST dataset
# and save all the 60,000 images as separate JPEG
# files. This might take a few minutes depending
# on your machine.
import numpy as np
from helper import mnist_export_to_jpg
np.random.seed(123)
mnist_export_to_jpg(path='./')
import os
for i in ('train', 'valid', 'test'):
print('mnist_%s subdirectories' % i, os.listdir('mnist_%s' % i))
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
some_img = os.path.join('./mnist_train/9/', os.listdir('./mnist_train/9/')[0])
img = mpimg.imread(some_img)
print(img.shape)
plt.imshow(img, cmap='binary');
import numpy as np
import glob
def images_to_pickles(data_stempath='./mnist_', which_set='train',
archive_size=5000, width=28, height=28, channels=1,
shuffle=False, seed=None):
if not os.path.exists('%snpz' % data_stempath):
os.mkdir('%snpz' % data_stempath)
img_paths = [p for p in glob.iglob('%s%s/**/*.jpg' %
(data_stempath, which_set), recursive=True)]
if shuffle:
rgen = np.random.RandomState(seed)
paths = rgen.shuffle(img_paths)
idx, file_idx = 0, 1
data = np.zeros((archive_size, height, width, channels), dtype=np.uint8)
labels = np.zeros(archive_size, dtype=np.uint8)
for path in img_paths:
if idx >= archive_size - 1:
idx = 0
savepath = os.path.join('%snpz' % data_stempath, '%s_%d.npz' %
(which_set, file_idx))
file_idx += 1
np.savez(savepath, data=data, labels=labels)
label = int(os.path.basename(os.path.dirname(path)))
image = mpimg.imread(path)
if len(image.shape) == 2:
data[idx] = image[:, :, np.newaxis]
labels[idx] = label
idx += 1
images_to_pickles(which_set='train', shuffle=True, seed=1)
images_to_pickles(which_set='valid', shuffle=True, seed=1)
images_to_pickles(which_set='test', shuffle=True, seed=1)
os.listdir('mnist_npz')
data = np.load('mnist_npz/test_1.npz')
print(data['data'].shape)
print(data['labels'].shape)
plt.imshow(data['data'][0][:, :, -1], cmap='binary');
print('Class label:', data['labels'][0])
class BatchLoader():
def __init__(self, minibatches_path,
normalize=True):
self.normalize = normalize
self.train_batchpaths = [os.path.join(minibatches_path, f)
for f in os.listdir(minibatches_path)
if 'train' in f]
self.valid_batchpaths = [os.path.join(minibatches_path, f)
for f in os.listdir(minibatches_path)
if 'valid' in f]
self.test_batchpaths = [os.path.join(minibatches_path, f)
for f in os.listdir(minibatches_path)
if 'train' in f]
self.num_train = 45000
self.num_valid = 5000
self.num_test = 10000
self.n_classes = 10
def load_train_epoch(self, batch_size=50, onehot=False,
shuffle_within=False, shuffle_paths=False,
seed=None):
for batch_x, batch_y in self._load_epoch(which='train',
batch_size=batch_size,
onehot=onehot,
shuffle_within=shuffle_within,
shuffle_paths=shuffle_paths,
seed=seed):
yield batch_x, batch_y
def load_test_epoch(self, batch_size=50, onehot=False,
shuffle_within=False, shuffle_paths=False,
seed=None):
for batch_x, batch_y in self._load_epoch(which='test',
batch_size=batch_size,
onehot=onehot,
shuffle_within=shuffle_within,
shuffle_paths=shuffle_paths,
seed=seed):
yield batch_x, batch_y
def load_validation_epoch(self, batch_size=50, onehot=False,
shuffle_within=False, shuffle_paths=False,
seed=None):
for batch_x, batch_y in self._load_epoch(which='valid',
batch_size=batch_size,
onehot=onehot,
shuffle_within=shuffle_within,
shuffle_paths=shuffle_paths,
seed=seed):
yield batch_x, batch_y
def _load_epoch(self, which='train', batch_size=50, onehot=False,
shuffle_within=True, shuffle_paths=True, seed=None):
if which == 'train':
paths = self.train_batchpaths
elif which == 'valid':
paths = self.valid_batchpaths
elif which == 'test':
paths = self.test_batchpaths
else:
raise ValueError('`which` must be "train" or "test". Got %s.' %
which)
rgen = np.random.RandomState(seed)
if shuffle_paths:
paths = rgen.shuffle(paths)
for batch in paths:
dct = np.load(batch)
if onehot:
labels = (np.arange(self.n_classes) ==
dct['labels'][:, None]).astype(np.uint8)
else:
labels = dct['labels']
if self.normalize:
# normalize to [0, 1] range
data = dct['data'].astype(np.float32) / 255.
else:
data = dct['data']
arrays = [data, labels]
del dct
indices = np.arange(arrays[0].shape[0])
if shuffle_within:
rgen.shuffle(indices)
for start_idx in range(0, indices.shape[0] - batch_size + 1,
batch_size):
index_slice = indices[start_idx:start_idx + batch_size]
yield (ary[index_slice] for ary in arrays)
batch_loader = BatchLoader(minibatches_path='./mnist_npz/',
normalize=True)
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=50, onehot=True):
print(batch_x.shape)
print(batch_y.shape)
break
cnt = 0
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=50, onehot=True):
cnt += batch_x.shape[0]
print('One training epoch contains %d images' % cnt)
def one_epoch():
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=50, onehot=True):
pass
% timeit one_epoch()
import tensorflow as tf
##########################
### SETTINGS
##########################
# Hyperparameters
learning_rate = 0.1
training_epochs = 15
batch_size = 100
# Architecture
n_hidden_1 = 128
n_hidden_2 = 256
height, width = 28, 28
n_classes = 10
##########################
### GRAPH DEFINITION
##########################
g = tf.Graph()
with g.as_default():
tf.set_random_seed(123)
# Input data
tf_x = tf.placeholder(tf.float32, [None, height, width, 1], name='features')
tf_x_flat = tf.reshape(tf_x, shape=[-1, height*width])
tf_y = tf.placeholder(tf.int32, [None, n_classes], name='targets')
# Model parameters
weights = {
'h1': tf.Variable(tf.truncated_normal([width*height, n_hidden_1], stddev=0.1)),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1)),
'out': tf.Variable(tf.truncated_normal([n_hidden_2, n_classes], stddev=0.1))
}
biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1])),
'b2': tf.Variable(tf.zeros([n_hidden_2])),
'out': tf.Variable(tf.zeros([n_classes]))
}
# Multilayer perceptron
layer_1 = tf.add(tf.matmul(tf_x_flat, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
# Loss and optimizer
loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=tf_y)
cost = tf.reduce_mean(loss, name='cost')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(cost, name='train')
# Prediction
correct_prediction = tf.equal(tf.argmax(tf_y, 1), tf.argmax(out_layer, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
##########################
### TRAINING & EVALUATION
##########################
batch_loader = BatchLoader(minibatches_path='./mnist_npz/',
normalize=True)
# preload small validation set
# by unpacking the generator
[valid_data] = batch_loader.load_validation_epoch(batch_size=5000,
onehot=True)
valid_x, valid_y = valid_data[0], valid_data[1]
del valid_data
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0.
n_batches = 0
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=batch_size,
onehot=True,
seed=epoch):
n_batches += 1
_, c = sess.run(['train', 'cost:0'], feed_dict={'features:0': batch_x,
'targets:0': batch_y.astype(np.int)})
avg_cost += c
train_acc = sess.run('accuracy:0', feed_dict={'features:0': batch_x,
'targets:0': batch_y})
valid_acc = sess.run('accuracy:0', feed_dict={'features:0': valid_x,
'targets:0': valid_y})
print("Epoch: %03d | AvgCost: %.3f" % (epoch + 1, avg_cost / n_batches), end="")
print(" | MbTrain/Valid ACC: %.3f/%.3f" % (train_acc, valid_acc))
# imagine test set is too large to fit into memory:
test_acc, cnt = 0., 0
for test_x, test_y in batch_loader.load_test_epoch(batch_size=100,
onehot=True):
cnt += 1
acc = sess.run(accuracy, feed_dict={'features:0': test_x,
'targets:0': test_y})
test_acc += acc
print('Test ACC: %.3f' % (test_acc / cnt))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The class <code>plot_error_surfaces</code> is just to help you visualize the data space and the parameter space during training and has nothing to do with PyTorch.
Step2: <!--Empty Space for separating topics-->
Step3: Generate values from -3 to 3 that create a line with a slope of 1 and a bias of -1. This is the line that you need to estimate. Add some noise to the data
Step4: Create a dataset object
Step5: Plot out the data and the line.
Step6: <!--Empty Space for separating topics-->
Step7: We will use PyTorch build-in functions to create a criterion function; this calculates the total loss or cost
Step8: Create a linear regression object and optimizer object, the optimizer object will use the linear regression object.
Step9: Remember to construct an optimizer you have to give it an iterable containing the parameters i.e. provide <code> model.parameters()</code> as an input to the object constructor
Step10: PyTorch randomly initialises your model parameters. If we use those parameters, the result will not be very insightful as convergence will be extremely fast. So we will initialise the parameters such that they will take longer to converge, i.e. look cool
Step11: Create a plotting object, not part of PyTroch, just used to help visualize
Step12: <!--Empty Space for separating topics-->
Step13: Let's use the following diagram to help clarify the process. The model takes <code>x</code> to produce an estimate <code>yhat</code>, it will then be compared to the actual <code>y</code> with the loss function.
|
<ASSISTANT_TASK:>
Python Code:
# These are the libraries we are going to use in the lab.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# class for ploting
class plot_error_surfaces(object):
# Constructor
def __init__(self, w_range, b_range, X, Y, n_samples = 30, go = True):
W = np.linspace(-w_range, w_range, n_samples)
B = np.linspace(-b_range, b_range, n_samples)
w, b = np.meshgrid(W, B)
Z = np.zeros((30, 30))
count1 = 0
self.y = Y.numpy()
self.x = X.numpy()
for w1, b1 in zip(w, b):
count2 = 0
for w2, b2 in zip(w1, b1):
Z[count1, count2] = np.mean((self.y - w2 * self.x + b2) ** 2)
count2 += 1
count1 += 1
self.Z = Z
self.w = w
self.b = b
self.W = []
self.B = []
self.LOSS = []
self.n = 0
if go == True:
plt.figure()
plt.figure(figsize = (7.5, 5))
plt.axes(projection = '3d').plot_surface(self.w, self.b, self.Z, rstride = 1, cstride = 1, cmap = 'viridis', edgecolor = 'none')
plt.title('Loss Surface')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
plt.figure()
plt.title('Loss Surface Contour')
plt.xlabel('w')
plt.ylabel('b')
plt.contour(self.w, self.b, self.Z)
plt.show()
# Setter
def set_para_loss(self, model, loss):
self.n = self.n + 1
self.LOSS.append(loss)
self.W.append(list(model.parameters())[0].item())
self.B.append(list(model.parameters())[1].item())
# Plot diagram
def final_plot(self):
ax = plt.axes(projection = '3d')
ax.plot_wireframe(self.w, self.b, self.Z)
ax.scatter(self.W, self.B, self.LOSS, c = 'r', marker = 'x', s = 200, alpha = 1)
plt.figure()
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
# Plot diagram
def plot_ps(self):
plt.subplot(121)
plt.ylim()
plt.plot(self.x, self.y, 'ro', label = "training points")
plt.plot(self.x, self.W[-1] * self.x + self.B[-1], label = "estimated line")
plt.xlabel('x')
plt.ylabel('y')
plt.ylim((-10, 15))
plt.title('Data Space Iteration: ' + str(self.n))
plt.show()
plt.subplot(122)
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.title('Loss Surface Contour Iteration' + str(self.n) )
plt.xlabel('w')
plt.ylabel('b')
# Import libraries and set random seed
import torch
from torch.utils.data import Dataset, DataLoader
torch.manual_seed(1)
# Create Data Class
class Data(Dataset):
# Constructor
def __init__(self):
self.x = torch.arange(-3, 3, 0.1).view(-1, 1)
self.f = 1 * self.x - 1
self.y = self.f + 0.1 * torch.randn(self.x.size())
self.len = self.x.shape[0]
# Getter
def __getitem__(self,index):
return self.x[index],self.y[index]
# Get Length
def __len__(self):
return self.len
# Create dataset object
dataset = Data()
# Plot the data
plt.plot(dataset.x.numpy(), dataset.y.numpy(), 'rx', label = 'y')
plt.plot(dataset.x.numpy(), dataset.f.numpy(), label = 'f')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
# Create a linear regression model class
from torch import nn, optim
class linear_regression(nn.Module):
# Constructor
def __init__(self, input_size, output_size):
super(linear_regression, self).__init__()
self.linear = nn.Linear(input_size, output_size)
# Prediction
def forward(self, x):
yhat = self.linear(x)
return yhat
# Build in cost function
criterion = nn.MSELoss()
# Create optimizer
model = linear_regression(1,1)
optimizer = optim.SGD(model.parameters(), lr = 0.01)
# Create Dataloader object
trainloader = DataLoader(dataset = dataset, batch_size = 1)
# Customize the weight and bias
model.state_dict()['linear.weight'][0] = -15
model.state_dict()['linear.bias'][0] = -10
# Create plot surface object
get_surface = plot_error_surfaces(15, 13, dataset.x, dataset.y, 30, go = False)
# Train Model
def train_model_BGD(iter):
for epoch in range(iter):
for x,y in trainloader:
yhat = model(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(model, loss.tolist())
optimizer.zero_grad()
loss.backward()
optimizer.step()
get_surface.plot_ps()
train_model_BGD(10)
# Practice: Train the model via BGD using optimizer
model = linear_regression(1,1)
model.state_dict()['linear.weight'][0] = -15
model.state_dict()['linear.bias'][0] = -10
get_surface = plot_error_surfaces(15, 13, dataset.x, dataset.y, 30, go = False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load somatosensory MEG data
Step2: Run iterative reweighted multidict TF-MxNE solver
Step3: Generate stc from dipoles
Step4: Show the evoked response and the residual for gradiometers
|
<ASSISTANT_TASK:>
Python Code:
# Author: Mathurin Massias <mathurin.massias@gmail.com>
# Yousra Bekhti <yousra.bekhti@gmail.com>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import os.path as op
import mne
from mne.datasets import somato
from mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
fwd_fname = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
# Read evoked
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg=True, eog=True, stim=True)
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, eog=350e-6)
event_id, tmin, tmax = dict(unknown=1), -0.5, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=reject,
baseline=(None, 0))
evoked = epochs.average()
evoked.crop(tmin=0.0, tmax=0.2)
# Compute noise covariance matrix
cov = mne.compute_covariance(epochs, rank='info', tmax=0.)
del epochs, raw
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
alpha, l1_ratio = 20, 0.05
loose, depth = 0.9, 1.
# Use a multiscale time-frequency dictionary
wsize, tstep = [4, 16], [2, 4]
n_tfmxne_iter = 10
# Compute TF-MxNE inverse solution with dipole output
dipoles, residual = tf_mixed_norm(
evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio,
n_tfmxne_iter=n_tfmxne_iter, loose=loose,
depth=depth, tol=1e-3,
wsize=wsize, tstep=tstep, return_as_dipoles=True,
return_residual=True)
stc = make_stc_from_dipoles(dipoles, forward['src'])
plot_sparse_source_estimates(
forward['src'], stc, bgcolor=(1, 1, 1), opacity=0.1,
fig_name=f"irTF-MxNE (cond {evoked.comment})")
ylim = dict(grad=[-300, 300])
evoked.copy().pick_types(meg='grad').plot(
titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim)
residual.copy().pick_types(meg='grad').plot(
titles=dict(grad='Residuals: Gradiometers'), ylim=ylim)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DicReader returns a "generator" -- which means that we only have 1 chance to read the returning row dictionaries.
Step2: Since the data is tabular format, pandas is ideally suited for such data. There are convenient pandas import functions for reading in tabular data.
Step3: As we briefly discussed last week, simply reading in without any configuration generates a fairly message data frame. We should try to specify some helping hints to pandas as to where the header rows are and which is the index colum
Step4: 2. Inputs
Step5: Let's take a look at the excel file that want to read into Jupyter
Step6: Here is how we can read the Excel file into the Jupyter environment.
Step7: What are the "sheets" in this workbook?
Step8: We will focus on the sheet 'Data'
Step9: For the sheet "Data", let's print out the content cell-by-cell to view the content.
Step10: Pandas also provides direct Excel data ingest
Step11: Here is another example with multiple sheets
Step12: This dataframe has a "multi-level" index
Step13: How do we export a dataframe back to Excel?
Step14: 3. Inputs
Step15: PDF is a proprietary file format with specific tagging that has been reverse engineered. Let's take a look at some structures in this file.
Step16: There is a table with structured data that we can peel out
Step17: 4. Configurations
Step18: 5. APIs
Step19: Create an API endpoint
Step20: Try REST-ful API call to Twitter
Step21: For streaming API call, we should run a standalone python program
Step22: 6. Python requests
Step23: Now, let's try to get a webpage. For this example, let's get GitHub's public timeline
Step24: Now, we have a Response object called r. We can get all the information we need from this object.
Step25: What about the other HTTP request types
Step26: Passing Parameters In URLs
Step27: You can see that the URL has been correctly encoded by printing the URL
Step28: Note that any dictionary key whose value is None will not be added to the URL's query string.
Step29: Response Content
Step30: Requests will automatically decode content from the server. Most unicode charsets are seamlessly decoded.
Step31: If you change the encoding, Requests will use the new value of r.encoding whenever you call r.text. You might want to do this in any situation where you can apply special logic to work out what the encoding of the content will be. For example, HTTP and XML have the ability to specify their encoding in their body. In situations like this, you should use r.content to find the encoding, and then set r.encoding. This will let you use r.text with the correct encoding.
Step32: In case the JSON decoding fails, r.json raises an exception. For example, if the response gets a 204 (No Content), or if the response contains invalid JSON, attempting r.json raises ValueError
Step33: Custom Headers
Step34: Note
Step35: The dictionary is special, though
Step36: Cookies
Step37: To send your own cookies to the server, you can use the cookies parameter
Step38: Redirection and History
Step39: If you're using GET, OPTIONS, POST, PUT, PATCH or DELETE, you can disable redirection handling with the allow_redirects parameter
Step40: If you're using HEAD, you can enable redirection as well
Step41: Timeouts
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import csv
my_reader = csv.DictReader(open('data/eu_revolving_loans.csv', 'r'))
for line in my_reader:
print(line)
import pandas as pd
df = pd.read_csv('data/eu_revolving_loans.csv')
df.head()
df = pd.read_csv('data/eu_revolving_loans.csv', header=[1,2,4], index_col=0)
df.head()
from __future__ import print_function
from openpyxl import load_workbook
!open 'data/climate_change_download_0.xlsx'
wb = load_workbook(filename='data/climate_change_download_0.xlsx')
wb.get_sheet_names()`
ws = wb.get_sheet_by_name('Data')
for row in ws.rows:
for cell in row:
print(cell.value)
import pandas as pd
df = pd.read_excel('data/climate_change_download_0.xlsx')
df.head()
df = pd.read_excel('data/GHE_DALY_Global_2000_2012.xls', sheetname='Global2012', header=[4,5])
df.columns
df.to_excel('data/my_excel.xlsx')
!open 'data/my_excel.xlsx'
import pdftables
my_pdf = open('data/WEF_GlobalCompetitivenessReport_2014-15.pdf', 'rb')
chart_page = pdftables.get_pdf_page(my_pdf, 29)
table = pdftables.page_to_tables(chart_page)
titles = zip(table[0][0], table[0][1])[:5]
titles = [''.join([title[0], title[1]]) for title in titles]
print(titles)
all_rows = []
for row_data in table[0][2:]:
all_rows.extend([row_data[:5], row_data[5:]])
print(all_rows)
from ConfigParser import ConfigParser
config = ConfigParser()
config.read('../cfg/sample.cfg')
config.sections()
import tweepy
auth = tweepy.OAuthHandler(config.get('twitter', 'consumer_key'), config.get('twitter', 'consumer_secret'))
auth.set_access_token(config.get('twitter','access_token'), config.get('twitter','access_token_secret'))
auth
api = tweepy.API(auth)
python_tweets = api.search('turkey')
for tweet in python_tweets:
print(tweet.text)
from pprint import pprint
import requests
weather_key = config.get('openweathermap', 'api_key')
res = requests.get("http://api.openweathermap.org/data/2.5/weather",
params={"q": "San Francisco", "appid": weather_key, "units": "metric"})
pprint(res.json())
import requests
r = requests.get('https://api.github.com/events')
r = requests.post('http://httpbin.org/post', data = {'key':'value'})
r = requests.put('http://httpbin.org/put', data = {'key':'value'})
r = requests.delete('http://httpbin.org/delete')
r = requests.head('http://httpbin.org/get')
r = requests.options('http://httpbin.org/get')
payload = {'key1': 'value1', 'key2': 'value2'}
r = requests.get('http://httpbin.org/get', params=payload)
print(r.url)
payload = {'key1': 'value1', 'key2': ['value2', 'value3']}
r = requests.get('http://httpbin.org/get', params=payload)
print(r.url)
import requests
r = requests.get('https://api.github.com/events')
r.text
r.encoding
r.encoding = 'ISO-8859-1'
import requests
r = requests.get('https://api.github.com/events')
r.json()
r.status_code
url = 'https://api.github.com/some/endpoint'
headers = {'user-agent': 'my-app/0.0.1'}
r = requests.get(url, headers=headers)
r.headers
r.headers['Content-Type']
r.headers.get('content-type')
url = 'http://www.cnn.com'
r = requests.get(url)
print(r.cookies.items())
url = 'http://httpbin.org/cookies'
cookies = dict(cookies_are='working')
r = requests.get(url, cookies=cookies)
r.text
r = requests.get('http://github.com')
r.url
r.status_code
r.history
r = requests.get('http://github.com', allow_redirects=False)
r.status_code
r.history
r = requests.head('http://github.com', allow_redirects=True)
r.url
r.history
requests.get('http://github.com', timeout=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DOMANDA
Step2: NOTA
Step3: Il metodo di Newton
Step4: DOMANDA
|
<ASSISTANT_TASK:>
Python Code:
def Enumerate(y, x):
# print(y)
if y == 0:
return -1
if x == y*y:
return y
return Enumerate(y-1, x)
print(Enumerate(16, 16))
print(Enumerate(15, 15))
1/10+1/10+1/10 == 3/10
def Abs(x):
if x < 0:
return -x
return x
def Istess(a,b):
return Abs(a-b) < 0.0001
def SqrtReals(x, a, b):
print(x, a, b)
y = (a+b)/2
if Istess(x, y*y):
return y
else:
if y*y > x:
return SqrtReals(x, a, y)
else:
return SqrtReals(x, y, b)
print(SqrtReals(36, 0, 36))
def Newton(x, y):
# print(x, x/y, y)
if Istess(x, y*y):
return y
return Newton(x, (y+x/y)/2)
print(Newton(2, 1))
# DA COMPLETARE
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pandas histogram
Step2: Let's try with seaborn's distplot.
Step3: Ah yes. We have to do some data munging before we can use it, removing the NaN (not a number or null) values. That should also be taken into consideration when talking about performance, if you have to do extra steps.
Step4: A little slower, nicer looking, but, again, not as informative as we'd like. Let's see how stem_graphic does.
Step5: We can see a lot of detail. The extremes ARE extreme ($5.2M). I can also see the trend for non managerial, managerial, upper management and beyond (350K). And I can look at this for much large sets of data.
Step6: From the above results, we see that the C written optimized word count (wc) application took close to 17 seconds to count words in the file. wc is used as a reference low watermark. It is expected to be faster than anything in Python loading that same document.
Step7: loading the data (29s) and displaying it (less than 1.5s) took a little over 30 seconds. Less than 31s at any rate is quite acceptable for 12 million rows.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import seaborn as sns
from stemgraphic import stem_graphic
texas = pd.read_csv('salaries.csv')
texas.describe(include='all')
%time ax = texas.Annual_salary.hist()
%time g = sns.distplot(texas.Annual_salary)
%time g = sns.distplot(texas.Annual_salary.dropna())
%time fig, ax = stem_graphic(texas.Annual_salary, display=500, random_state=1235)
!head yellow_tripdata_2015-01.csv
%time !wc yellow_tripdata_2015-01.csv
%time df1 = pd.read_csv('yellow_tripdata_2015-01.csv')
df1.head()
%%time
fig, ax = stem_graphic(df1.total_amount, display=500);
# you need to have the dask module installed to run this part
import dask.multiprocessing
dask.set_options(get=dask.multiprocessing.get)
import dask.dataframe as dd
%%time
df = dd.read_csv('yellow_tripdata_2015-01.csv')
stem_graphic(df.total_amount, display=500)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What can we specify?
Step2: Since we only specified the where, the NWIS will assume the following elements
Step3: Here's what the data look like in table form
Step4: Different ways to specify which site you want
Step5: Okay, maybe I shouldn't have typed 'xx' for our service.
Step6: Getting help
|
<ASSISTANT_TASK:>
Python Code:
# First, import hydrofunctions.
import hydrofunctions as hf
minimum_request = hf.NWIS('01585200')
minimum_request
minimum_request.df()
# For example, let's mistpye one of our parameters that worked so well above:
notSoGoodNWIS = hf.NWIS('01585200', 'xx', period='P200D')
# Let's ask for the impossible: the start date is AFTER the end date:
badRequest = hf.get_nwis('01585200', 'dv', '2017-12-31', '2017-01-01')
# Use the help() function to see all of the parameters for a function, their default values,
# and a short explanation of what it all means. Or you can type ?hf.NWIS to access the same information.
help(hf.NWIS)
# Use the dir() function to see what sort of methods you have available to you,
# or type hf.NWIS.<TAB> to see the same list.
dir(hf.NWIS)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: No-Go Areas
Step2: Wissensinseln
Step3: Wissensanteile berechnen
Step4: Maximales Wissen pro Datei identifizieren
Step5: Visualisierung erstellen
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
log = pd.read_csv("../../../software-data/projects/linux/linux_blame_log.csv.gz")
log.head()
log.info()
top10 = log['author'].value_counts().head(10)
top10
%matplotlib inline
top10.plot.pie();
log['timestamp'] = pd.to_datetime(log['timestamp'])
log.head()
log['age'] = pd.Timestamp("today") - log['timestamp']
log.head()
log['component'] = log['path'].str.split("/").str[:2].str.join(":")
log.head()
age_per_component = log.groupby('component')['age'].min().sort_values()
age_per_component.head()
age_per_component.plot.bar(
title="Alter pro Komponente (in Jahren)",
figsize=[15,5]);
knowledge = log.groupby(
['path', 'author']).agg(
{'timestamp':'min', 'line':'count'}
)
knowledge.head()
knowledge['all'] = knowledge.groupby('path')['line'].transform('sum')
knowledge['knowing'] = knowledge['line'] / knowledge['all']
knowledge.head()
max_knowledge_per_file = knowledge.groupby(['path'])['knowing'].transform(max)
knowledge_carriers = knowledge[knowledge['knowing'] == max_knowledge_per_file]
knowledge_carriers = knowledge_carriers.reset_index(level=1)
knowledge_carriers.head()
from ausi import d3
d3.create_json_for_zoomable_circle_packing(
knowledge_carriers.reset_index(),
'author',
'author',
'path',
'/',
'all',
'knowing',
'linux_circle_packing'
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Like last week, we're going to use pyspark, a Python package that wraps Apache Spark and makes its functionality available in Python. We'll also use a few of the standard Python libraries - json, socket, threading and time - as well as the sseclient package you just installed to connect to the event stream.
Step2: Streaming Wikipedia events
Step3: Streaming analysis
Step4: Next, we create a StreamingContext object, which represents the streaming functionality of our Spark cluster. When we create the context, we must specify a batch duration time (in seconds), to tell Spark how often it should process data from the stream. Let's process the Wikipedia data in batches of one second
Step5: Using our StreamingContext object, we can create a data stream from our local TCP relay socket with the socketTextStream method
Step6: Even though we've created a data stream, nothing happens! Before Spark starts to consume the stream, we must first define one or more operations to perform on it. Let's count the number of edits made by different users in the last minute
Step7: Again, nothing happens! This is because the StreamingContext must be started before the stream is processed by Spark. We can start data streaming using the start method of the StreamingContext and stop it using the stop method. Let's run the stream for two minutes (120 seconds) and then stop
|
<ASSISTANT_TASK:>
Python Code:
!pip install sseclient
import json
import pyspark
import socket
import threading
import time
from pyspark.streaming import StreamingContext
from sseclient import SSEClient
def relay():
events = SSEClient('https://stream.wikimedia.org/v2/stream/recentchange')
s = socket.socket()
s.bind(('localhost', 50000))
s.listen(1)
while True:
try:
client, address = s.accept()
for event in events:
if event.event == 'message':
client.sendall(event.data)
break
except:
pass
finally:
client.close()
threading.Thread(target=relay).start()
sc = pyspark.SparkContext(master='local[*]')
ssc = StreamingContext(sc, 1)
stream = ssc.socketTextStream('localhost', 50000)
users = (
stream.map(json.loads) # Parse the stream data as JSON
.map(lambda obj: obj['user']) # Extract the values corresponding to the 'user' key
.map(lambda user: (user, 1)) # Give each user a count of one
.window(60) # Create a sliding window, sixty seconds in length
.reduceByKey(lambda a, b: a + b) # Reduce all key-value pairs in the window by adding values
.transform( # Sort by the largest count
lambda rdd: rdd.sortBy(lambda kv: kv[1], ascending=False))
.pprint() # Print the results
)
ssc.start()
time.sleep(120)
ssc.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we shall parse all the data from the ICSD ternary file and try and map all the space groups to their space-group numbers, after doing some cleaning up of the data like getting rid of specifiers for RHOMBOHEDRAL/Hexagonal axes etc.
Step2: We shall check if our mapping to space group numbers is exact and then plot the frequency distribution of the different space groups.
Step3: We see that all the lines have been mapped successfully and that a few spacegroups are not present at all while others have as many as ~6000 entries.
Step4: How are the counts distributed among the space groups? Turns out it's not quite exponentially decaying.
|
<ASSISTANT_TASK:>
Python Code:
with open("ICSD/spacegroups.dat",'r') as f:
dat=csv.reader(f,dialect='excel-tab',quoting=csv.QUOTE_NONE)
list=[element.strip() for row in dat for element in row ]
list1=[[int(list[i*2]),list[i*2+1]] for i in range(len(list)/2)]
dict_space={}
for i in range(len(list1)):
dict_space[list1[i][1]]=list1[i][0]
with open('ICSD/spacegroups_2.dat','r') as f1:
f=f1.readlines()
for line in f:
data=[element.strip() for element in line.split()]
if data[1] not in dict_space.keys():
dict_space[data[1]]=int(data[0])
with open('ICSD/spacegroups_3.dat','r') as f1:
f=f1.readlines()
for line in f:
data=[element.strip() for element in line.split()]
if data[0] not in dict_space.keys():
dict_space[data[0]]=int(data[1])
with open('ICSD/icsd-ternaries.csv','r') as f:
data=csv.reader(f,"excel-tab")
list_data=[[element.strip() for element in row] for row in data]
for row in list_data:
row[1]=row[1].replace(' ','')
list_space=[row[1].rstrip('Z').rstrip('S').rstrip("H").rstrip('R') for row in list_data]
list_nf=[]
count_f=plt.array([0]*230)
count_not=0
for s in list_space:
if s in dict_space.keys():
#print "Found Element in dictionary for space_group_name {0}, with space_group number {1}".format(s,dict_space[s])
count_f[dict_space[s]-1]+=1
else:
#print "Entry not found for space group name ",s
list_nf.append(s)
print "Found Entries={0}, Not Found Entries={1}".format(sum(count_f),len(list_space)-sum(count_f))
print "Found No Entries for these spacegroups",plt.array(plt.where(count_f==0))+1
plt.plot(plt.arange(230),count_f,'bo-')
sg_counts = sorted(enumerate(count_f,1), key = lambda x: x[1], reverse = True)
print(" SG Count")
print("--- -----")
for i in range(20):
sg,count = sg_counts[i]
print("{:3} {:4}".format(sg, count))
plt.semilogy(range(len(sg_counts)), [e[1] for e in sg_counts], "o-")
for a in [27,48,89,93,153,170,171,172,179,184,192,207,211]:
print a,count_f[a-1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Conditional Probability
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib_venn import venn2
venn2(subsets = (0.45, 0.15, 0.05), set_labels = ('A', 'B'))
import pandas as pd
df = pd.DataFrame([[6,1,3,'Fradulent'],[14,29,47,'Not Fradulent']],
columns=['Fire', 'Auto','Other','Status'])
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A short truss tutorial is available here
Step2: Detailed results at the nodes
Step3: Detailed results on the bars
Step4: Dead (or structural) mass
Step5: Questions
Step6: Question 2
Step8: Question 2
Step10: Question 3
Step12: Question 4
Step14: Question 5
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib nbagg
import sys, copy, os
from scipy import optimize
sys.path.append("truss-master")
try:
import truss
print("Truss is correctly installed")
except:
print("Truss is NOT correctly installed !")
E = 210.e9 # Young Modulus [Pa]
rho = 7800. # Density [kg/m**3]
A = 5.e-2 # Cross section [m**2]
sigmay = 400.e6 # Yield Stress [Pa]
# Model definition
model = truss.core.Model() # Model definition
# NODES
nA = model.add_node((0.,0.), label = "A")
nC = model.add_node((3.,0.), label = "C")
nD = model.add_node((3.,3.), label = "D")
nE = model.add_node((6.,0.), label = "E")
nF = model.add_node((6.,3.), label = "F")
nG = model.add_node((9.,0.), label = "G")
nH = model.add_node((9.,3.), label = "H")
# BOUNDARY CONDITIONS
nA.block[1] = True
nG.block[0] = True
nH.block[0] = True
# BARS
AC = model.add_bar(nA, nC, modulus = E, density = rho, section = A, yield_stress = sigmay)
CD = model.add_bar(nC, nD, modulus = E, density = rho, section = A, yield_stress = sigmay)
AD = model.add_bar(nA, nD, modulus = E, density = rho, section = A, yield_stress = sigmay)
CE = model.add_bar(nC, nE, modulus = E, density = rho, section = A, yield_stress = sigmay)
DF = model.add_bar(nD, nF, modulus = E, density = rho, section = A, yield_stress = sigmay)
DE = model.add_bar(nD, nE, modulus = E, density = rho, section = A, yield_stress = sigmay)
EF = model.add_bar(nE, nF, modulus = E, density = rho, section = A, yield_stress = sigmay)
EG = model.add_bar(nE, nG, modulus = E, density = rho, section = A, yield_stress = sigmay)
FH = model.add_bar(nF, nH, modulus = E, density = rho, section = A, yield_stress = sigmay)
FG = model.add_bar(nF, nG, modulus = E, density = rho, section = A, yield_stress = sigmay)
GH = model.add_bar(nG, nH, modulus = E, density = rho, section = A, yield_stress = sigmay)
# STRUCTURAL LOADING
nG.force = np.array([0., -1.e6])
model.solve()
xlim, ylim = model.bbox(deformed = False)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_aspect("equal")
#ax.axis("off")
model.draw(ax, deformed = False, field = "stress", label = True, force_scale = 1.e-6, forces = True)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
plt.xlabel("Axe $x$")
plt.ylabel("Axe $y$")
model.data(at = "nodes")
model.data(at = "bars")
m0 = model.mass()
m0 * 1.e-3 # Mass in tons !
# Example:
model.data(at = "bars").state.failure.values
#...
new_section = 9e-3
for bar in model.bars:
bar.section = new_section
model.solve()
model.data(at = "bars").state.failure.values
m1 = model.mass()
m1 *1.e-3 # mass in tons.
print("Reduced mass by {0} kg".format(m0 - m1))
def alpha(X):
Cost function
nD.coords[1] = X[0]
nF.coords[1] = X[1]
nH.coords[1] = X[2]
model.solve()
return abs(nG.displacement[1]) * model.mass()
X0 = [nD.coords[1], nF.coords[1], nH.coords[1]]
optimize.minimize(alpha, X0, method = "nelder-mead")
xlim, ylim = model.bbox(deformed = False)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_aspect("equal")
#ax.axis("off")
model.draw(ax, deformed = False, field = "stress", label = True, force_scale = 1.e-6, forces = True)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
plt.xlabel("Axe $x$")
plt.ylabel("Axe $y$")
model.mass()
def alpha(X):
Cost function
nD.coords[1] = X[0]
nF.coords[1] = X[1]
nH.coords[1] = X[2]
nC.coords[0] = X[3]
nD.coords[0] = X[4]
nE.coords[0] = X[5]
nF.coords[0] = X[6]
model.solve()
return abs(nG.displacement[1]) * model.mass()
X0 = [nD.coords[1], nF.coords[1], nH.coords[1], nC.coords[0], nD.coords[0], nE.coords[0], nF.coords[0],]
sol = optimize.minimize(alpha, X0, method = "nelder-mead", options = {"maxfev": 1000})
xlim, ylim = model.bbox(deformed = False)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(1,1,1)
ax.set_aspect("equal")
#ax.axis("off")
model.draw(ax, deformed = False, field = "stress", label = True, force_scale = 1.e-6, forces = True)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
plt.xlabel("Axe $x$")
plt.ylabel("Axe $y$")
model2 = truss.core.Model() # Model definition
# NODES
nA = model2.add_node((0.,0.), label = "A")
#nC = model2.add_node((3.,0.), label = "C")
#nD = model2.add_node((3.,3.), label = "D")
#nE = model2.add_node((6.,0.), label = "E")
nF = model2.add_node((6.,3.), label = "F")
nG = model2.add_node((9.,0.), label = "G")
nH = model2.add_node((9.,3.), label = "H")
# BOUNDARY CONDITIONS
nA.block[1] = True
nG.block[0] = True
nH.block[0] = True
#G.force = np.array([0., -1.])*1.e4
#C.force[0] = .2
#C.force[1] = -0.
# BARS
AG = model2.add_bar(nA, nG, modulus = E, density = rho, section = A, yield_stress = sigmay)
FG = model2.add_bar(nF, nG, modulus = E, density = rho, section = A, yield_stress = sigmay)
HG = model2.add_bar(nH, nG, modulus = E, density = rho, section = A, yield_stress = sigmay)
AF = model2.add_bar(nA, nF, modulus = E, density = rho, section = A, yield_stress = sigmay)
FH = model2.add_bar(nF, nH, modulus = E, density = rho, section = A, yield_stress = sigmay)
# STRUCTURAL LOADING
nG.force = np.array([0., -1.e6])
def alpha(X):
Cost function
nF.coords[1] = X[0]
nH.coords[1] = X[1]
nF.coords[0] = X[2]
model2.solve()
return abs(nG.displacement[1]) * model2.mass()
X0 = [nF.coords[1], nH.coords[1], nF.coords[0]]
optimize.minimize(alpha, X0, method = "nelder-mead")
xlim, ylim = model2.bbox(deformed = False)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_aspect("equal")
#ax.axis("off")
model2.draw(ax, deformed = False, field = "stress", label = True, force_scale = 1.e-6, forces = True)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
plt.xlabel("Axe $x$")
plt.ylabel("Axe $y$")
model2.data("bars")
def beta(X):
Cost function
for i in range(len(X)):
model2.bars[i].section = X[i]
model2.solve()
return ((abs(model2.data("bars").state.stress.values) - sigmay)**2).sum()
X0 = [bar.section for bar in model2.bars]
sol = optimize.minimize(beta, X0, method = "nelder-mead", options = {"maxfev": 10000, "maxiter":10000})
print(sol)
xlim, ylim = model2.bbox(deformed = False)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_aspect("equal")
#ax.axis("off")
model2.draw(ax, deformed = False, field = "stress", label = True, force_scale = 1.e-6, forces = True)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
plt.xlabel("Axe $x$")
plt.ylabel("Axe $y$")
model2.data("bars")
model2.mass()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Rapidgram
Step3: Question 1
Step5: Question 2
Step7: Question 3
Step9: Question 4
Step11: Question 5
Step13: Do you think this query will work as intended? Why or why not? Try designing a better query below
Step15: Question 6
Step17: Using the generate_series view, get a sample of ten students, weighted in this manner.
Step19: Question 7
|
<ASSISTANT_TASK:>
Python Code:
!pip install ipython-sql
%load_ext sql
%sql sqlite:///./lab06.sqlite
import sqlalchemy
engine = sqlalchemy.create_engine("sqlite:///lab06.sqlite")
connection = engine.connect()
!pip install -U okpy
from client.api.notebook import Notebook
ok = Notebook('lab06.ok')
%%sql
DROP TABLE IF EXISTS users;
DROP TABLE IF EXISTS follows;
CREATE TABLE users (
USERID INT NOT NULL,
NAME VARCHAR (256) NOT NULL,
YEAR FLOAT NOT NULL,
PRIMARY KEY (USERID)
);
CREATE TABLE follows (
USERID INT NOT NULL,
FOLLOWID INT NOT NULL,
PRIMARY KEY (USERID, FOLLOWID)
);
%%capture
count = 0
users = ["Ian", "Daniel", "Sarah", "Kelly", "Sam", "Alison", "Henry", "Joey", "Mark", "Joyce", "Natalie", "John"]
years = [1, 3, 4, 3, 4, 2, 5, 2, 1, 3, 4, 2]
for username, year in zip(users, years):
count += 1
%sql INSERT INTO users VALUES ($count, '$username', $year);
%%capture
follows = [0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1,
0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1,
0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1,
0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0,
1, 1, 0, 1]
for i in range(12):
for j in range(12):
if i != j and follows[i + j*12]:
%sql INSERT INTO follows VALUES ($i+1, $j+1);
q1 =
...
%sql $q1
q1_answer = connection.execute(q1).fetchall()
_ = ok.grade('q1')
_ = ok.backup()
q2 =
...
%sql $q2
q2_answer = connection.execute(q2).fetchall()
_ = ok.grade('q2')
_ = ok.backup()
q2_answer
q3 =
...
%sql $q3
q3_answer = connection.execute(q3).fetchall()
_ = ok.grade('q3')
_ = ok.backup()
q4 =
...
%sql $q4
q4_answer = connection.execute(q4).fetchall()
_ = ok.grade('q4')
_ = ok.backup()
q5a =
SELECT u1.name as follower, u2.name as followee
FROM follows, users as u1, users as u2
WHERE follows.userid=u1.userid
AND follows.followid=u2.userid
AND RANDOM() < 0.33
q5b =
...
%sql $q5b
q5_answers = [connection.execute(q5b).fetchall() for _ in range(100)]
_ = ok.grade('q5')
_ = ok.backup()
q6a =
WITH RECURSIVE generate_series(value) AS (
SELECT 0
UNION ALL
SELECT value+1 FROM generate_series
WHERE value+1<=10
)
SELECT value
FROM generate_series
%sql $q6a
q6b =
WITH RECURSIVE generate_series(value) AS (
SELECT 0
UNION ALL
SELECT value+1 FROM generate_series
WHERE value+1<=12
)
SELECT name
FROM ...
WHERE ...
ORDER BY ...
LIMIT 10
%sql $q6b
q6_answers = [connection.execute(q6b).fetchall() for _ in range(100)]
_ = ok.grade('q6')
_ = ok.backup()
q7 =
SELECT name FROM (
SELECT ...
)
WHERE year > avg_follower_years
%sql $q7
q7_answer = connection.execute(q7).fetchall()
_ = ok.grade('q7')
_ = ok.backup()
_ = ok.grade_all()
_ = ok.submit()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Давайте рассмотрим всех пользователей из контрольной группы (treatment = 1). Для таких пользователей мы хотим проверить гипотезу о том, что штат абонента не влияет на то, перестанет ли абонент пользоваться услугами оператора.
Step2: В основе критерия xи-квадрат лежит предположение о том, что если верна нулевая гипотеза, то дискретное биномиальное распределение данных по клеткам в таблице сопряженности может быть аппроксимировано с помощью непрерывного распределения xи-квадрат. Однако точность такой аппроксимации существенно зависит от суммарного количества наблюдений и их распределения в этой таблице (отсюда и ограничения при использовании критерия xи-квадрат).
Step3: Что если у нас мало данных, мы не хотим использовать аппроксимацию дискретного распределения непрерывным и использовать сомнительную поправку, предположения критерия xи-квадрат не выполняются, а проверить гипотезу о том, что данные принадлежат одному распределению, нужно?
Step4: Давайте попробуем применить полученные знания о разных видах корреляции и ее применимости на практике.
Step5: Еще раз рассмотрим пару признаков day_calls и mes_estim. Посчитайте корреляцию Спирмена между этими признаками на всех данных, ее значимость.
Step6: Посчитайте значение коэффицента корреляции Крамера между двумя признаками
Step7: Вы прослушали большой курс и к текущему моменту обладете достаточными знаниями, чтобы попытаться самостоятельно выбрать нужный метод / инструмент / статистический критерий и сделать правильное заключение.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import numpy as np
import pandas as pd
import scipy as sc
from scipy import stats
from statsmodels.stats.proportion import proportion_confint
from statsmodels.sandbox.stats.multicomp import multipletests
from itertools import combinations
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
#reading data
churn = pd.read_csv('churn_analysis.csv')
churn.info()
pd.set_option('display.max_columns', None)
churn.columns
churn.drop('Unnamed: 0', axis=1, inplace=True)
churn.head()
churn.churn.loc[churn.churn == 'True.'] = True
churn.churn.loc[churn.churn == 'False.'] = False
churn.head()
pt_states = pd.pivot_table(churn.loc[churn.treatment == 1], values='treatment', index='state', columns='churn',
aggfunc=len, fill_value=0)
pt_states
pt_states.index
chi2_val = pd.DataFrame(columns=['state 1', 'state 2', 'chi2_stat', 'p-value'])
i = 0
for st1, st2 in combinations(pt_states.index, 2):
chi2_table = pt_states.loc[[st1, st2],:]
chi2_val.loc[i, 'state 1'] = st1
chi2_val.loc[i, 'state 2'] = st2
chi2_stat = stats.chi2_contingency(chi2_table.values, correction=False)
chi2_val.loc[i, 'chi2_stat'] = chi2_stat[0]
chi2_val.loc[i, 'p-value'] = chi2_stat[1]
i += 1
pd.set_option('display.max_rows', None)
chi2_val
sign_lvls_num = len(chi2_val.loc[chi2_val['p-value'] <= 0.05, :])
print('Num of significance lvls below 0.05: %d' % sign_lvls_num)
chi2_val = pd.DataFrame(columns=['state 1', 'state 2', 'chi2_stat', 'p-value'])
i = 0
for st1, st2 in combinations(pt_states.index, 2):
chi2_table = pt_states.loc[[st1, st2],:]
chi2_val.loc[i, 'state 1'] = st1
chi2_val.loc[i, 'state 2'] = st2
chi2_stat = stats.chi2_contingency(chi2_table.values, correction=True)
chi2_val.loc[i, 'chi2_stat'] = chi2_stat[0]
chi2_val.loc[i, 'p-value'] = chi2_stat[1]
i += 1
chi2_val
sign_lvls_num = len(chi2_val.loc[chi2_val['p-value'] <= 0.05, :])
print('Num of significance lvls below 0.05: %d' % sign_lvls_num)
fisher_val = pd.DataFrame(columns=['state 1', 'state 2', 'fisher_stat', 'p-value'])
i = 0
for st1, st2 in combinations(pt_states.index, 2):
fisher_table = pt_states.loc[[st1, st2],:]
fisher_val.loc[i, 'state 1'] = st1
fisher_val.loc[i, 'state 2'] = st2
fisher_stat = stats.fisher_exact(fisher_table.values)
fisher_val.loc[i, 'fisher_stat'] = fisher_stat[0]
fisher_val.loc[i, 'p-value'] = fisher_stat[1]
i += 1
fisher_val
print('Pearson correlation: %.4f' % stats.pearsonr(churn.day_calls, churn.mes_estim)[0])
print('P-value: %.4f' % stats.pearsonr(churn.day_calls, churn.mes_estim)[1])
print('Spearman correlation: %.4f' % stats.spearmanr(churn.day_calls, churn.mes_estim)[0])
print('P-value: %.4f' % stats.spearmanr(churn.day_calls, churn.mes_estim)[1])
_ = plt.figure(figsize=(15,6))
_ = plt.subplot(121)
_ = sc.stats.probplot(churn.day_calls, dist="norm", plot=plt)
_ = plt.subplot(122)
_ = sc.stats.probplot(churn.mes_estim, dist="norm", plot=plt)
_ = plt.scatter(churn.day_calls, churn.mes_estim, alpha=0.2)
_ = plt.xlabel('day_calls')
_ = plt.ylabel('mes_estim')
def cramers_stat(confusion_matrix):
chi2 = stats.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum()
return np.sqrt(chi2 / (n*(min(confusion_matrix.shape)-1)))
print('V Cramer stat value: %.4f' % cramers_stat(pt_states.values))
print('p-value: %.4f' % stats.chi2_contingency(pt_states.values)[1])
pt_states = pd.pivot_table(churn, values='account_length', index='state', columns=['treatment', 'churn'],
aggfunc=len, fill_value=0)
pt_states
#proportions of churn = 1 for each treatment
props_churn = np.array([pt_states.loc[:, treat].loc[:, True].sum() /
(pt_states.loc[:, treat].loc[:, False].sum() + pt_states.loc[:, treat].loc[:, True].sum())
for treat in range(3)])
for i, prop in enumerate(props_churn):
print('Treatment #%d prop. of churn = 1: %.4f' % (i, prop))
#wilson intervals for proportions
wilson_ints = np.array([proportion_confint(pt_states.loc[:, treat].loc[:, True].sum(),
(pt_states.loc[:, treat].loc[:, False].sum() + pt_states.loc[:, treat].loc[:, True].sum()),
method='wilson') for treat in range(3)])
for i, wils in enumerate(wilson_ints):
print('Treatment #%d prop. of churn = 1: %s' % (i, str(wils)))
def proportions_confint_diff_ind(sample1, sample2, alpha = 0.05):
z = stats.norm.ppf(1 - alpha / 2.)
l1 = sample1.loc[:, False].sum() + sample1.loc[:, True].sum()
l2 = sample2.loc[:, False].sum() + sample2.loc[:, True].sum()
p1 = sample1.loc[:, True].sum() / l1
p2 = sample2.loc[:, True].sum() / l2
left_boundary = (p1 - p2) - z * np.sqrt(p1 * (1 - p1)/ l1 + p2 * (1 - p2)/ l2)
right_boundary = (p1 - p2) + z * np.sqrt(p1 * (1 - p1)/ l1 + p2 * (1 - p2)/ l2)
return (left_boundary, right_boundary)
def proportions_diff_z_stat_ind(sample1, sample2):
l1 = sample1.loc[:, False].sum() + sample1.loc[:, True].sum()
l2 = sample2.loc[:, False].sum() + sample2.loc[:, True].sum()
p1 = sample1.loc[:, True].sum() / l1
p2 = sample2.loc[:, True].sum() / l2
P = (p1*l1 + p2*l2) / (l1 + l2)
return (p1 - p2) / np.sqrt(P * (1 - P) * (1. / l1 + 1. / l2))
def proportions_diff_z_test(z_stat, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'two-sided':
return 2 * (1 - stats.norm.cdf(np.abs(z_stat)))
if alternative == 'less':
return stats.norm.cdf(z_stat)
if alternative == 'greater':
return 1 - stats.norm.cdf(z_stat)
p_val_table = np.empty((3, 3))
i = 0
for tr1, tr2 in combinations(range(3), 2):
print('Treatments #%d and %d: ' % (tr1, tr2)),
confint_diff = proportions_confint_diff_ind(pt_states.loc[:, tr1], pt_states.loc[:, tr2])
print('[%.4f, %.4f], ' % confint_diff),
p_val = proportions_diff_z_test(proportions_diff_z_stat_ind(pt_states.loc[:, tr1], pt_states.loc[:, tr2]))
p_val_table[i, :] = tr1, tr2, p_val
i += 1
print('p-value: %.4f' % p_val)
p_val_table
_, p_corrected, _, _ = multipletests(p_val_table[:,2], alpha = 0.05, method = 'fdr_bh')
p_corrected
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Rendering our results
Step2:
|
<ASSISTANT_TASK:>
Python Code:
data
y = "b"
x = ["x","y"]
train, valid, test = data.split_frame([0.75, 0.15])
from h2o.estimators import H2ODeepLearningEstimator
m = H2ODeepLearningEstimator(model_id="DL_defaults", hidden=[20,20,20,20,20,20,20,20,20,20], activation='tanh',epochs=10000)
m.train(x,y,train)
m
import numpy as np
import pandas as pd
from h2o.frame import H2OFrame
from PIL import Image
IM_SIZE = 256
IM_CHANNELS = 1
def save_pixels(path_to_image_file, image_array, mode):
im_out = Image.fromarray(image_array, mode )
im_out.save(path_to_image_file)
# Takes care of clipping, casting to int8, etc.
def save_ndarray(path_to_outfile, x, width = IM_SIZE, height = IM_SIZE, channels = IM_CHANNELS):
out_arr = np.clip(x, 0, 255)
if channels == 3:
out_arr = np.reshape(out_arr, (width, height, channels), 1)
else:
assert(channels == 1)
out_arr = np.reshape(out_arr, (width, height), 1)
out_arr = np.rot90(out_arr, k=3)
out_arr = np.fliplr(out_arr)
if channels == 3:
save_pixels(path_to_outfile, out_arr.astype(np.int8), 'RGB')
else:
save_pixels(path_to_outfile, out_arr.astype(np.int8), 'L')
# Create suitable training matrix
def gen_input_tuples(pixels_width, pixels_height, scale, translate_x, translate_y):
image_height = pixels_height
image_width = pixels_width
# One row per pixel
X = np.zeros((image_width * image_height, 2))
# Fill in y values
X[:,1] = np.repeat(range(0, image_height), image_width, 0)
# Fill in x values
X[:,0] = np.tile(range(0, image_width), image_height)
# Normalize X
X = X - X.mean()
X = X / X.var()
X[:,0] += translate_x
X[:,1] += translate_y
X = X / scale
return (X)
def render(mdl, image_size, scale, tx, ty, outfile):
pixel_coords = gen_input_tuples(image_size, image_size, scale, tx, ty)
df_pixels_to_render = pd.DataFrame({'x':pixel_coords[:,0], 'y':pixel_coords[:,1]})
h2o_pixels = H2OFrame(df_pixels_to_render)
pixel_intensities = m.predict(h2o_pixels)
save_ndarray(outfile, pixel_intensities.as_data_frame().as_matrix(), image_size, image_size, 1)
render(m, IM_SIZE, 1, 0, 0, "modelled_tower_bridge.png")
render(m, 1024, 1/8, 0, 0, "modelled_tower_bridge_x2.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Resources for this
Step2: Get the activity of a list
Step4: Since are going to be computing correlations between N different time series data sets and that's an $O(N^2)$ operation, let's limit N.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from bigbang.archive import Archive
arx = Archive("scipy-user",archive_dir="../archives")
act = arx.get_activity()
cutoff = 20
def filtered_participants(cutoff):
xc = act.sum() > cutoff
return act.columns[xc]
filtered_participants(cutoff)[:10]
from scipy.stats.stats import pearsonr
fc = filtered_participants(cutoff)
n = len(fc)
pc = np.zeros([n,n])
for i in range(0,n):
for j in range(i,n):
# since pearson correlation assumes normally distributed data
# and we believe activity to be log-normally distributed (see Shalizi...)
# we'll take the log of values here
si = np.log1p(act[fc[i]])
sj = np.log1p(act[fc[j]])
c = pearsonr(si,sj)[0]
pc[i,j] = c
pc[j,i] = c
pc
G = nx.Graph(pc)
labels = dict(enumerate(fc))
G = nx.relabel_nodes(G,labels)
plt.imshow(pc)
# from http://sociograph.blogspot.com/2012/11/visualizing-adjacency-matrices-in-python.html
import networkx as nx
from matplotlib import pyplot, patches
def draw_adjacency_matrix(G, node_order=None, partitions=[], colors=[],cmap="Greys"):
- G is a networkx graph
- node_order (optional) is a list of nodes, where each node in G
appears exactly once
- partitions is a list of node lists, where each node in G appears
in exactly one node list
- colors is a list of strings indicating what color each
partition should be
If partitions is specified, the same number of colors needs to be
specified.
adjacency_matrix = nx.to_numpy_matrix(G, dtype=np.bool, nodelist=node_order)
#Plot adjacency matrix in toned-down black and white
fig = pyplot.figure(figsize=(5, 5)) # in inches
pyplot.imshow(adjacency_matrix,
cmap=cmap,
interpolation="none")
# The rest is just if you have sorted nodes by a partition and want to
# highlight the module boundaries
assert len(partitions) == len(colors)
ax = pyplot.gca()
current_idx = 0
for partition, color in zip(partitions, colors):
#for module in partition:
ax.add_patch(patches.Rectangle((current_idx, current_idx),
len(partition), # Width
len(partition), # Height
facecolor="none",
edgecolor=color,
linewidth="1"))
current_idx += len(partition)
# order nodes by number of messages sent by each participant
o = list(act[fc].sum().order(ascending=False).keys())
draw_adjacency_matrix(G,node_order=o)
from sklearn import cluster
n_clusters = 5
sc = cluster.SpectralClustering(n_clusters=n_clusters)
partition = sc.fit_predict(pc)
partition_order = [fc[x] for x in np.argsort(partition)]
np.sort(partition)
from collections import defaultdict
parts = defaultdict(list)
for i in range(len(partition)):
parts[partition[i]].append(fc[i])
colors = [plt.cm.hsv(1. / (k + 1)) for k in parts.keys()]
colors
len(parts.values())
draw_adjacency_matrix(G,
node_order=partition_order,
partitions=parts.values(),
colors=colors)
for k,v in parts.items():
print str(k) + ": " + str(len(v))
node_parts = {}
for k,v in parts.items():
for n in v:
node_parts[n] = int(k)
import networkx as nx
#FG = nx.Graph(pc > .01)
FG = nx.from_numpy_matrix(pc * (pc > 0))
labels = dict(enumerate(fc))
FG = nx.relabel_nodes(FG,labels)
nx.set_node_attributes(FG,'corr-cluster',node_parts)
pos = nx.spring_layout(FG, k = 0.6, iterations = 1000)
nx.draw(FG,pos)
nx.write_gexf(FG,'corr.gexf')
from bigbang import plot
plot.stack(act,partition=parts.values(),smooth=7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Profile selection
Step2: The same outpout can be reported as a pandas dataframe
Step3: Dump to excel
Step4: Our targets are
Step5: The ppl object now has the two profiles available in the data attribute
Step6: while the label attibute stores the variable type
Step7: Ppl data structure
Step8: To plot the last timestep
Step9: The time can also be used as parameter
|
<ASSISTANT_TASK:>
Python Code:
ppl_path = '../../pyfas/test/test_files/'
fname = 'FC1_rev01.ppl'
ppl = fa.Ppl(ppl_path+fname)
ppl.filter_data('PT')
pd.DataFrame(ppl.filter_data('PT'), index=("Profiles",)).T
pd.DataFrame(ppl.filter_data("TM"), index=("Profiles",)).T
pd.DataFrame(ppl.filter_data("PT"), index=("Profiles",)).T
ppl.extract(13)
ppl.extract(12)
ppl.data.keys()
ppl.label[13]
%matplotlib inline
geometry = ppl.data[12][0]
pt_riser = ppl.data[12][1]
tm_riser = ppl.data[13][1]
def ppl_plot(geo, v0, v1, ts):
fig, ax0 = plt.subplots(figsize=(12, 7));
ax0.grid(True)
p0, = ax0.plot(geo, v0[ts])
ax0.set_ylabel("[C]", fontsize=16)
ax0.set_xlabel("[m]", fontsize=16)
ax1 = ax0.twinx()
p1, = ax1.plot(geo, v1[ts]/1e5, 'r')
ax1.grid(False)
ax1.set_ylabel("[bara]", fontsize=16)
ax1.tick_params(axis="both", labelsize=16)
ax1.tick_params(axis="both", labelsize=16)
plt.legend((p0, p1), ("Temperature profile", "Pressure profile"), loc=3, fontsize=16)
plt.title("P and T for case FC1", size=20);
ppl_plot(geometry, tm_riser, pt_riser, -1)
import ipywidgets.widgets as widgets
from ipywidgets import interact
timesteps=len(tm_riser)-1
@interact
def ppl_plot(ts=widgets.IntSlider(min=0, max=timesteps)):
fig, ax0 = plt.subplots(figsize=(12, 7));
ax0.grid(True)
p0, = ax0.plot(geometry, tm_riser[ts])
ax0.set_ylabel("[C]", fontsize=16)
ax0.set_xlabel("[m]", fontsize=16)
ax0.set_ylim(10, 12)
ax1 = ax0.twinx()
ax1.set_ylim(90, 130)
p1, = ax1.plot(geometry, pt_riser[ts]/1e5, 'r')
ax1.grid(False)
ax1.set_ylabel("[bara]", fontsize=16)
ax1.tick_params(axis="both", labelsize=16)
ax1.tick_params(axis="both", labelsize=16)
plt.legend((p0, p1), ("Temperature profile", "Pressure profile"), loc=3, fontsize=16)
plt.title("P and T for case FC1 @ timestep {}".format(ts), size=20);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basics of Molecular Dynamics
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import HTML
css_file = 'https://raw.githubusercontent.com/ngcm/training-public/master/ipython_notebook_styles/ngcmstyle.css'
HTML(url=css_file)
%matplotlib inline
import numpy
from matplotlib import pyplot
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 16
rcParams['figure.figsize'] = (12,6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining Helper Functions
Step2: Train and evaluate a Neural Network (NN) model
Step3: Training the neural network
Step4: First, train your model by using 128 hidden layers.
Step5: You would normally expect that adding layers to a model will improve the ability of the model to fit the training data, and thus lower the loss. Notice that it is not always the case though.
Step6: Train and evaluate a Deep Neural Network model
|
<ASSISTANT_TASK:>
Python Code:
# Import and print the installed version of TensorFlow
import tensorflow as tf
print(tf.version.VERSION)
# Helper functions
def training_plot(metrics, history):
f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5))
for idx, metric in enumerate(metrics):
ax[idx].plot(history.history[metric], ls='dashed')
ax[idx].set_xlabel("Epochs")
ax[idx].set_ylabel(metric)
ax[idx].plot(history.history['val_' + metric]);
ax[idx].legend([metric, 'val_' + metric])
# Call model.predict() on a few images in the evaluation dataset
def plot_predictions(filename):
f, ax = plt.subplots(3, 5, figsize=(25,15))
dataset = (tf.data.TextLineDataset(filename).
map(decode_csv))
for idx, (img, label) in enumerate(dataset.take(15)):
ax[idx//5, idx%5].imshow((img.numpy()));
batch_image = tf.reshape(img, [1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
batch_pred = model.predict(batch_image)
pred = batch_pred[0]
label = CLASS_NAMES[label.numpy()]
pred_label_index = tf.math.argmax(pred).numpy()
pred_label = CLASS_NAMES[pred_label_index]
prob = pred[pred_label_index]
ax[idx//5, idx%5].set_title('{}: {} ({:.4f})'.format(label, pred_label, prob))
def show_trained_weights(model):
# CLASS_NAMES is ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
LAYER = 1 # Layer 0 flattens the image, layer=1 is the first dense layer
WEIGHT_TYPE = 0 # 0 for weight, 1 for bias
f, ax = plt.subplots(1, 5, figsize=(15,15))
for flower in range(len(CLASS_NAMES)):
weights = model.layers[LAYER].get_weights()[WEIGHT_TYPE][:, flower]
min_wt = tf.math.reduce_min(weights).numpy()
max_wt = tf.math.reduce_max(weights).numpy()
flower_name = CLASS_NAMES[flower]
print("Scaling weights for {} in {} to {}".format(
flower_name, min_wt, max_wt))
weights = (weights - min_wt)/(max_wt - min_wt)
ax[flower].imshow(weights.reshape(IMG_HEIGHT, IMG_WIDTH, 3));
ax[flower].set_title(flower_name);
# The import statement combines two operations; it searches for the named module, then it binds the results of that search
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
def read_and_decode(filename, reshape_dims):
# TODO 1: Read the file
img = tf.io.read_file(filename)
# Convert the compressed string to a 3D uint8 tensor.
img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# Resize the image to the desired size.
return tf.image.resize(img, reshape_dims)
CLASS_NAMES = [item.numpy().decode("utf-8") for item in
tf.strings.regex_replace(
tf.io.gfile.glob("gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/*"),
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/", "")]
CLASS_NAMES = [item for item in CLASS_NAMES if item.find(".") == -1]
print("These are the available classes:", CLASS_NAMES)
# the label is the index into CLASS_NAMES array
def decode_csv(csv_row):
record_defaults = ["path", "flower"]
filename, label_string = tf.io.decode_csv(csv_row, record_defaults)
img = read_and_decode(filename, [IMG_HEIGHT, IMG_WIDTH])
label = tf.argmax(tf.math.equal(CLASS_NAMES, label_string))
return img, label
import tensorflow as tf
import numpy as np
import matplotlib.pylab as plt
fig, ax = plt.subplots(1, 3, figsize=(10,5))
x = np.arange(-10.0, 10.0, 0.1)
y = tf.keras.activations.sigmoid(x)
ax[0].plot(x, y);
ax[0].set_title("sigmoid")
y = tf.keras.activations.relu(x)
ax[1].plot(x, y);
ax[1].set_title("relu")
y = tf.keras.activations.elu(x)
ax[2].plot(x, y);
ax[2].set_title("elu");
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')
])
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, show_layer_names=False)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')
])
model.summary()
BATCH_SIZE = 32
train_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/train_set.csv").
map(decode_csv)).batch(BATCH_SIZE)
eval_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv").
map(decode_csv)).batch(BATCH_SIZE)
# NN with one hidden layer
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)),
tf.keras.layers.Dense(128, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
# TODO 2: Train a Neural Network model
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10)
training_plot(['loss', 'accuracy'], history)
# parameterize to the values in the previous cell
def train_and_evaluate(batch_size = 32,
lrate = 0.001, # default in Adam constructor
l1 = 0,
l2 = 0,
num_hidden = 128):
regularizer = tf.keras.regularizers.l1_l2(l1, l2)
train_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/train_set.csv").
map(decode_csv)).batch(batch_size)
eval_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv").
map(decode_csv)).batch(32) # this doesn't matter
# NN with one hidden layers
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)),
tf.keras.layers.Dense(num_hidden,
kernel_regularizer=regularizer,
activation=tf.keras.activations.relu),
tf.keras.layers.Dense(len(CLASS_NAMES),
kernel_regularizer=regularizer,
activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10)
training_plot(['loss', 'accuracy'], history)
return model
model = train_and_evaluate(batch_size=32, lrate=0.0001, l1=0, l2=0, num_hidden=128)
model = train_and_evaluate(batch_size=32, lrate=0.0001, l1=0, l2=0, num_hidden=256)
# parameterize to the values in the previous cell
def train_and_evaluate(batch_size = 32,
lrate = 0.0001,
l1 = 0,
l2 = 0.001,
num_hidden = [64, 16]):
regularizer = tf.keras.regularizers.l1_l2(l1, l2)
train_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/train_set.csv").
map(decode_csv)).batch(batch_size)
eval_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv").
map(decode_csv)).batch(32) # this doesn't matter
# NN with multiple hidden layers
layers = [tf.keras.layers.Flatten(
input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),
name='input_pixels')]
layers = layers + [
tf.keras.layers.Dense(nodes,
kernel_regularizer=regularizer,
activation=tf.keras.activations.relu,
name='hidden_dense_{}'.format(hno))
for hno, nodes in enumerate(num_hidden)
]
layers = layers + [
tf.keras.layers.Dense(len(CLASS_NAMES),
kernel_regularizer=regularizer,
activation='softmax',
name='flower_prob')
]
model = tf.keras.Sequential(layers, name='flower_classification')
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False),
metrics=['accuracy'])
print(model.summary())
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10)
training_plot(['loss', 'accuracy'], history)
return model
# TODO 3: Train and evaluate a DNN model
model = train_and_evaluate(lrate=0.0001, l2=0.001, num_hidden = [64, 16])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Need a udf that returns a mapType of string
Step2: Now let's write this out and go back and join to the main DF for some summaries
Step3: How much more information might we be able to find in records that records that are not JSON parsable?
|
<ASSISTANT_TASK:>
Python Code:
def json_to_map(s):
Convert a string containing JSON into a dictionary,
Skip flattening for now.
try:
return json.loads(s)
except:
return {}
json_to_map_udf = udf(json_to_map, MapType(StringType(), StringType()))
print(json_to_map('{ "solr_long_lat": "-5.87403,30.49728", "related_record_types": "PreservedSpecimen|PreservedSpecimen", "related_record_links": "YPM-IP-530950|YPM-IP-530951" }'))
idb_map = (idb_dyn
.withColumn("props_map", json_to_map_udf(col("props_str")))
)
idb_map.select(col("props_map")).show(10, truncate=False)
idb_triples = (idb_map
.select(col("uuid"),
col("recordset"),
col("institutioncode"),
explode(col("props_map")).alias("key", "value"))
)
idb_triples.cache()
idb_triples.count()
idb_triples.show(20, truncate=False)
(idb_triples
.groupBy(col("key"))
.count()
.sort(col("count"), ascending=False)
.limit(1000)
).toPandas()
(idb_triples
.groupBy(col("institutioncode"))
.count()
.sort(col("count"), ascending=False)
.limit(1000)
).toPandas()
(idb_triples
.filter(col("key") == "NSF_TCN")
.count()
)
(idb_triples
.filter(col("key") == "NSF_TCN")
.groupBy(col("institutioncode"), col("value"))
.count()
.sort(col("count"), ascending=False)
.limit(1000)
).toPandas()
#(idb_triples
# .write
# .parquet("/tmp/idigbio-20171014T023306-json-triples.parquet")
#)
(idb_triples
.select(length(col("key")).alias("len_key"))
.avg(col("len_key"))
.show()
)
#joined = idb_dyn.join(idb_triples, idb_dyn["uuid"] == idb_triples["uuid"], "inner")
#joined.show(3, truncate=False)
#joined.count()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Summary
Step2: NOTES
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import pylab, mlab, gridspec
from IPython.core.pylabtools import figsize, getfigs
from IPython.display import display, HTML
from pylab import *
# GLOBALS
# working directory
CWD = os.getcwd()
# Data File(s)
VERSION = "20170321"
DATAFILE = "afc_" + VERSION + ".csv"
datapath = os.path.join(CWD,DATAFILE)
# read the data file into a data frame
df = pd.read_csv(datapath)
# a custom definition of an info method callable on a series
def sinfo(x):
xu = x.unique()
l = len(x)
typ = str(np.array(xu.tolist()).dtype) + " "
nans = "null: " + str(l - x.count()) + " "
lgt = "len: " + str(l) + " "
unq = "unq: " + str(len(xu))
return (typ+nans+lgt+unq)
# function to output summary information (dimensions,data types among others) for
# the specified dataframe
def summarize(df):
# output the number of rows and cols of the data frame
print "Dimensions: {}".format(df.shape)
# print the variables, the number of unique values and a list of unique values for each col
print "{}".format(df.apply(lambda x:(sinfo(x),x.unique())))
summarize(df)
df.head()
# group by Region
df_g_region = df.groupby('Region')
# compute counts for key indicators for each group
indices = []
cols = ['NumRCFELicensed','NumRCFEBedsLicensed','NumRCFEInALWP','2012Pop65Over','2030Pop65Over',
'2012PopMinority','2012MedianHHIncome','2012MedianHHIncome65Over','2012PopLowIncome65Over']
# create a 2-D array to hold information for each group
dim = (len(df_g_region.groups),len(cols))
data = np.zeros(dim, dtype=np.float)
pos = 0
for region, group in df_g_region:
indices.append(region)
sra_data = group[group['Zipcode'] == 0]
#display(sra_data)
sra_data_sum = sra_data[cols].sum(axis=0).to_frame().T
#display(sra_data_sum)
data[pos] = sra_data_sum.values
pos = pos + 1
df_region = pd.DataFrame(data=data,columns=cols,index=indices)
df_region = df_region.apply(lambda x: x.astype(int))
display(df_region)
# convert population and income data to kilo units
df_region[cols[3:]] = df_region[cols[3:]].apply(lambda x: x/1000)
df_region['NumRCFEBedsLicensed'] = df_region['NumRCFEBedsLicensed'].apply(lambda x: int(x/100))
# convert to long format for visualization
df_region = df_region.T
region_list = df_region.columns.tolist()
# this configuration below prevents pylab from importing anything into the global namespace
# needed to prevent user warning about namespace clashes
#%config InteractiveShellApp.pylab_import_all = False
#%pylab inline
%matplotlib inline
matplotlib.rcParams['font.size'] = 10
matplotlib.rcParams['font.weight'] = 'bold'
# plot the above in a bar graph
width = 0.3
labels = ['NumRCFELicensed','NumRCFEBedsLicensed','NumRCFE (ALWP)','65+ (2012)','65+ (2030)','Minorities (2012)',
'Med HH Income','Med HH Income (65+)','Low Income (65+)']
pos = np.arange(len(labels))
x_ticks_pos = pos + (0.15 * width)
num_regions = len(region_list)
fig, axes = plt.subplots(num_regions, 1, figsize=(15, 25))
for i,region in enumerate(region_list):
plot_vals = df_region[region]
bars = axes[i].bar(pos,plot_vals,width,color='#00688B',alpha=0.5)
for j, bar in enumerate(bars):
ht = bar.get_height()
bar_pos = bar.get_x() + bar.get_width()/2.
ht = ((ht+1) * -50) if plot_vals[j] < 0 else ht+10
txt = '{:.0f}K'.format(plot_vals[j]) if j>2 else '{:.0f}'.format(plot_vals[j])
txt = '{:.0f} (x 100)'.format(plot_vals[j]) if j==2 else txt
axes[i].text(bar_pos, ht, txt, ha='center', va='bottom')
axes[i].spines['top'].set_visible(False)
axes[i].spines['right'].set_visible(False)
axes[i].set_xlim(min(pos)-width, max(pos)+width)
axes[i].set_ylim([0, max(plot_vals)] )
title = region
axes[i].set_title(title,fontsize=14, fontweight='bold',color='#FF8C00')
axes[i].title.set_position([.5,1.05])
#axes[i].grid(True)
if i == (num_regions-1):
axes[i].set_xticks(x_ticks_pos)
axes[i].set_xticklabels(labels,rotation=45,fontsize=11, fontweight='bold')
else:
axes[i].tick_params(axis='x',labelbottom='off')
axes[i].margins(0.3, 0)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook is to test the optimization of the test locations V, W in NFSIC.
Step2: Grid search for Gaussian widths. Random test locations
Step3: Optimize the test locations V, W and Gaussian widths
Step5: Test power + optimization
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
#%config InlineBackend.figure_format = 'pdf'
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import fsic.util as util
import fsic.data as data
import fsic.kernel as kernel
import fsic.indtest as it
import fsic.glo as glo
import scipy.stats as stats
# font options
font = {
#'family' : 'normal',
#'weight' : 'bold',
'size' : 14
}
plt.rc('font', **font)
plt.rc('lines', linewidth=2)
#matplotlib.rc('text', usetex=True)
#matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
def get_quad_psfunc():
Return a PairedSource to generate y = x^2 + Gaussian noise.
mean = 2
width = 6
px = lambda n: stats.uniform.rvs(loc=mean-width/2, scale=width, size=n)[:, np.newaxis]
f = lambda x: 0.3*(x-1)**2 + 0.3*np.random.randn(n, 1)
#f = lambda x: x
return data.PSFunc(f, px)
# paired source
alpha = 0.01
n = 1000
dx = 50
dy = 5
seed = 339
ps = data.PSIndSameGauss(dx, dy)
#ps = data.PS2DUnifRotate(angle=np.pi/4)
#ps = data.PSUnifRotateNoise(angle=np.pi/3, noise_dim=2)
#ps = get_quad_psfunc()
#ps = data.PSIndUnif(xlb=[0, 3], xub=[1, 10], ylb=[-5, 5], yub=[8, 10])
#ps = data.PS2DSinFreq(freq=2)
pdata = ps.sample(n, seed=seed)
tr, te = pdata.split_tr_te(tr_proportion=0.5, seed=seed+5)
# plot the data. Assume 2d. Plot the first dimensions of X and Y.
xtr, ytr = tr.xy()
plt.plot(xtr[:, 0], ytr[:, 0], 'ob')
plt.xlabel('$X$')
plt.ylabel('$Y$')
J = 2
V, W = it.GaussNFSIC.init_locs_2randn(tr, J, seed=seed+1)
X, Y = tr.xy()
n_gwidth_cand = 30
gwidthx_factors = 2.0**np.linspace(-4, 4, n_gwidth_cand)
gwidthy_factors = gwidthx_factors
#gwidthy_factors = 2.0**np.linspace(-3, 4, 40)
medx = util.meddistance(X, 1000)
medy = util.meddistance(Y, 1000)
list_gwidthx = np.hstack( ( (medx**2)*gwidthx_factors ) )
list_gwidthy = np.hstack( ( (medy**2)*gwidthy_factors ) )
bestij, lambs = it.GaussNFSIC.grid_search_gwidth(tr, V, W, list_gwidthx, list_gwidthy)
# These are width^2
best_widthx = list_gwidthx[bestij[0]]
best_widthy = list_gwidthy[bestij[1]]
# plot
Candy, Candx = np.meshgrid(list_gwidthy, list_gwidthx)
plt.figure(figsize=(8,5))
plt.contourf(Candx, Candy, lambs)
plt.plot(best_widthx, best_widthy, '*k', markersize=25, label='Best widths')
plt.xlabel('Gaussian width for $X$')
plt.ylabel('Gaussian width for $Y$')
plt.title('Plot $\hat{\lambda}_n$. Best widths: (%.3g, %.3g)'
%(best_widthx**0.5, best_widthy**0.5))
plt.legend(numpoints=1)
plt.colorbar()
# perform test
nfsic_grid = it.GaussNFSIC(best_widthx, best_widthy, V, W, alpha)
test_result = nfsic_grid.perform_test(te)
test_result
op = {'n_test_locs':J, 'max_iter':400,
'V_step':1, 'W_step':1, 'gwidthx_step':1, 'gwidthy_step':1,
'batch_proportion':0.7, 'tol_fun':1e-4, 'step_pow':0.5, 'seed':seed+7}
op_V, op_W, op_gwx, op_gwy, info = it.GaussNFSIC.optimize_locs_widths(tr, alpha, **op )
# perform test
nfsic_full = it.GaussNFSIC(op_gwx, op_gwy, op_V, op_W, alpha)
nfsic_full.perform_test(te)
# Plot evolution of the test locations, Gaussian width
# trajectories of the Gaussian widths
gwidthxs = info['gwidthxs']
gwidthys = info['gwidthys']
fig, axs = plt.subplots(3, 2, figsize=(12, 10))
axs[1, 0].plot(gwidthxs, label='widths(X)')
#axs[0, 0].plot(gwidthys, label='widths(Y)')
axs[1, 0].set_xlabel('iteration')
axs[1, 0].set_ylabel('Gaussian width for X')
axs[1, 0].legend()
#axs[0, 0].set_title('Gaussian width evolution')
axs[2, 0].plot(gwidthys, label='widths(Y)')
axs[2, 0].set_xlabel('iteration')
axs[2, 0].set_ylabel('Gaussian width for Y')
axs[2, 0].legend()
# evolution of objective values
objs = info['obj_values']
axs[0, 1].plot(objs)
axs[0, 1].set_title('Objective $\hat{\lambda}_n$')
# trajectories of the test locations
# iters x J. X Coordinates of all test locations
Vs = info['Vs']
vs = Vs[:, 0, 0]
axs[1, 1].plot(vs)
axs[1, 1].set_xlabel('iteration')
axs[1, 1].set_ylabel('dim 0 of V')
Ws = info['Ws']
ws = Ws[:, 0, 0]
axs[2, 1].plot(ws)
axs[2, 1].set_xlabel('iteration')
axs[2, 1].set_ylabel('dim 0 of W')
print('medx2: %g'%medx**2)
print('medy2: %g'%medy**2)
print('optimized gwx: %g'%info['gwidthxs'][-1])
print('optimized gwy: %g'%info['gwidthys'][-1])
print('optimized + bounding gwx: %g'%op_gwx)
print('optimized + bounding gwy: %g'%op_gwy)
V = nfsic_full.V
W = nfsic_full.W
# plot
plt.figure(figsize=(10, 5))
plt.imshow(V, interpolation='none')
plt.title('V. J x d = %d x %d'%(V.shape[0], V.shape[1]))
plt.colorbar(orientation='horizontal')
loc_ind = 0
# Vs: #iters x J x d
plt.figure(figsize=(10, 5))
plt.plot(Vs[:, loc_ind, :]);
plt.xlabel('iteration')
plt.title('Consider location %d. dx = %d.'%(loc_ind, Vs.shape[2]) )
dim = 0
plt.figure(figsize=(10, 5))
plt.plot(Vs[:, :, dim]);
plt.xlabel('iteration')
plt.title('Consider dim %d. All %d locations of X'%(dim, J))
reps = 50
n = 1000
J = 10
alpha = 0.05
# None = use aymptotics
n_permute = None
#n_permute = 200
ps = data.PSIndSameGauss(dx=20, dy=20)
def run_trial(r):
r: repetition number
Return the resulting GaussNFSIC object, optimization info
print('starting rep: %d'%(r+1))
pdata = ps.sample(n, seed=r)
tr, te = pdata.split_tr_te(tr_proportion=0.5, seed=r+87)
nfsic_opt_options = {'n_test_locs':J, 'max_iter':200,
'V_step':1, 'W_step':1, 'gwidthx_step':1, 'gwidthy_step':1,
'batch_proportion':0.7, 'tol_fun':1e-3, 'step_pow':0.5, 'seed':r+2,
'reg': 1e-6}
#V, W = it.GaussNFSIC.init_locs_joint_subset(pdata2, J, seed=r+1)
#V, W = it.GaussNFSIC.init_locs_2randn(pdata, J, seed=r+3)
op_V, op_W, op_gwx, op_gwy, info = it.GaussNFSIC.optimize_locs_widths(tr,
alpha, **nfsic_opt_options )
nfsic_opt = it.GaussNFSIC(op_gwx, op_gwy, op_V, op_W, alpha=alpha,
reg='auto', n_permute=n_permute, seed=r+3)
return nfsic_opt, info
#from multiprocessing.dummy import Pool as ThreadPool
#threads = 4
#pool = ThreadPool(threads)
#rep_nfsics = pool.map(run_trial, range(reps))
opt_infos = []
rep_nfsics = []
for r in range(reps):
nf, info = run_trial(r)
opt_infos.append(info)
rep_nfsics.append(nf)
test_results = np.zeros(reps, dtype=object)
for r in range(reps):
nfsic = rep_nfsics[r]
pdata = ps.sample(4000, seed=r+1)
tr, te = pdata.split_tr_te(tr_proportion=0.5, seed=r+87)
nfsic_result = nfsic.perform_test(te)
test_results[r] = nfsic_result
# sequence of power
#rejs = [re['h0_rejected'] for re in test_results[:(r+1)]]
#print 'power at rep %3d: %5.4g, #rejs: %3d'%(r+1, np.mean(rejs), np.sum(rejs))
rejs = np.array([r['h0_rejected'] for r in test_results])
rep_stats = np.array([r['test_stat'] for r in test_results])
thresh = stats.chi2.isf(alpha, df=J)
power = np.mean(rejs)
print('power: %g'%power)
np.where(np.isnan(rep_stats))
# histogram
dom = np.linspace(stats.chi2.isf(0.99, df=J), stats.chi2.isf(0.01, df=J), 600)
chi2_den = stats.chi2.pdf(dom, df=J)
plt.figure(figsize=(10, 5))
plt.hist(rep_stats[np.isfinite(rep_stats)], bins=20,
alpha=0.5, label='Repeated trials', density=True)
plt.plot(dom, chi2_den, '-', label=r'$\chi^2(%d)$'%J)
plt.legend()
# check optimized locations
def plot_opt_VW_trial(r):
nf = rep_nfsics[r]
V = nf.V
W = nf.W
VW = np.hstack((V, W))
VW = VW[np.isfinite(np.sum(VW,1))]
#print VW
# plot
plt.plot(VW[:, 0], VW[:, 1], 'o')
plt.xlabel('V')
plt.ylabel('W')
plt.xlim([-4, 4])
plt.ylim([-4, 4])
plt.title('trial: %d, #locs: %d'%(r, VW.shape[0]))
from ipywidgets import interact, fixed
interact(plot_opt_VW_trial, r=(0, reps-1, 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize the OpenTire factory and create a Pacejka 2002 tire model
Step2: Initialize the tire state
Step3: Solving for the tire forces will update the tire state
Step4: Iterate over vertical loads and slip angles to generate a lateral force vs slip angle plot at three different vertical loads.
|
<ASSISTANT_TASK:>
Python Code:
from opentire import OpenTire
from opentire.Core import TireState
from opentire.Core import TIRFile
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
openTire = OpenTire()
myTireModel = openTire.createmodel('PAC2002')
state = TireState()
state['FZ'] = 1500
state['IA'] = 0.0
state['SR'] = 0.0
state['SA'] = 0.0
state['FY'] = 0.0
state['V'] = 10.0
state['P'] = 260000
myTireModel.solve(state)
pprint(state)
# Setup the simulation range
vertical_loads = [500, 1000, 2000]
slip_angles = np.arange(-12, 12, 0.1) * 3.14 / 180
# Initialize the lateral force result
lateral_force = []
for fz in vertical_loads:
lateral_force = []
state['FZ'] = fz
for sa in slip_angles:
# Solving
state['SA'] = sa
myTireModel.solve(state)
lateral_force.append(state['FY'])
# Plot the series
plt.plot(slip_angles * 180 / 3.14, lateral_force, label=fz)
# Plotting
plt.grid()
plt.xlabel('Slip Angle [deg]')
plt.ylabel('Lateral Force [N]')
plt.title('Lateral Force vs. Slip Angle')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Flower power
Step2: ConvNet Codes
Step3: Below I'm running images through the VGG network in batches.
Step4: Building the Classifier
Step5: Data prep
Step6: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn.
Step7: If you did it right, you should see these sizes for the training sets
Step9: Batches!
Step10: Training
Step11: Testing
Step12: Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
|
<ASSISTANT_TASK:>
Python Code:
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 32
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
my_vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
my_vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# Get the values from the relu6 layer of the VGG network
feed_dict = {input_ : images}
# KEY!!!!
codes_batch = sess.run(my_vgg.relu6, feed_dict = feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
print(labels)
# read codes and labels from file
import csv
import numpy as np
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
print(codes.size)
codes = codes.reshape((len(labels), -1))
print(codes.shape, labels.shape)
from sklearn.preprocessing import LabelBinarizer
labelBinarizer = LabelBinarizer()
labelBinarizer.fit(labels)
labels_vecs = labelBinarizer.transform(labels)
print(labels_vecs.shape)
from sklearn.model_selection import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(n_splits=1, test_size=.2)
i, j = next( sss.split(codes, labels) )
h = len(j) // 2
j, k = j[:h], j[h: ] # Validation 50%
# end j at half of j and start k at half of j
train_x, train_y = codes[i], labels_vecs[i]
val_x, val_y = codes[j], labels_vecs[j]
test_x, test_y = codes[k], labels_vecs[k]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
l_1 = tf.contrib.layers.fully_connected(inputs_, 256)
logits = tf.contrib.layers.fully_connected(l_1, labels_vecs.shape[1], activation_fn=None)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits)
cost = tf.reduce_mean( cross_entropy )
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_batches(x, y, n_batches=10):
Return a generator that yields batches from arrays x and y.
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
saver = tf.train.Saver()
e = 20
iteration = 0
with tf.Session() as sess:
# 1. Start session
sess.run(tf.global_variables_initializer() )
# 2. Do each epoch
for i in range(e):
#3. Do each batch
for x, y in get_batches(train_x, train_y):
#4. Input data
feed = {inputs_: x, labels_: y}
#5. Do loss
loss, _ = sess.run([cost, optimizer], feed_dict=feed)
#6. Increment counter
iteration += 1
#7. Print results
print("Epoch: {} / {}".format( i, e),
"Iteration: {}".format( iteration ),
"Train loss: {:.5f}".format( loss ))
#8. Do Validation
if iteration % 5 == 0:
feed = {inputs_: val_x, labels_: val_y}
val_acc = sess.run(accuracy, feed_dict=feed)
print("Epoch: {} / {}".format( i, e),
"Iteration: {} ".format( iteration),
"Validation Acc: {:.4f}".format(val_acc) )
saver.save(sess, "checkpoints/flowers.ckpt")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
# Run this cell if you don't have a vgg graph built
if 'my_vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
test_img_path = 'flower_photos/daisy/5547758_eea9edfd54_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
## KEY
code = sess.run(my_vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
print(max(prediction))
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), labelBinarizer.classes_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's first define the list of parameters to use in each dataset.
Step2: Now, let's define the function to generate each dataset.
|
<ASSISTANT_TASK:>
Python Code:
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import predictor.feature_extraction as fe
import utils.preprocessing as pp
# Input values
GOOD_DATA_RATIO = 0.99 # The ratio of non-missing values for a symbol to be considered good
SAMPLES_GOOD_DATA_RATIO = 0.9 # The ratio of non-missing values for an interval to be considered good
train_val_time = -1 # In real time days (-1 is for the full interval)
''' Step days will be fixed. That means that the datasets with longer base periods will have samples
that are more correlated. '''
step_days = 7 # market days
base_days = [7, 14, 28, 56, 112] # In market days
ahead_days = [1, 7, 14, 28, 56] # market days
datasets_params_list_df = pd.DataFrame([(x,y) for x in base_days for y in ahead_days],
columns=['base_days', 'ahead_days'])
datasets_params_list_df['train_val_time'] = train_val_time
datasets_params_list_df['step_days'] = step_days
datasets_params_list_df['GOOD_DATA_RATIO'] = GOOD_DATA_RATIO
datasets_params_list_df['SAMPLES_GOOD_DATA_RATIO'] = SAMPLES_GOOD_DATA_RATIO
datasets_params_list_df
def generate_one_set(params):
# print(('-'*70 + '\n {}, {} \n' + '-'*70).format(params['base_days'].values, params['ahead_days'].values))
tic = time()
train_val_time = int(params['train_val_time'])
base_days = int(params['base_days'])
step_days = int(params['step_days'])
ahead_days = int(params['ahead_days'])
print('Generating: base{}_ahead{}'.format(base_days, ahead_days))
pid = 'base{}_ahead{}'.format(base_days, ahead_days)
# Getting the data
data_df = pd.read_pickle('../../data/data_train_val_df.pkl')
today = data_df.index[-1] # Real date
print(pid + ') data_df loaded')
# Drop symbols with many missing points
data_df = pp.drop_irrelevant_symbols(data_df, params['GOOD_DATA_RATIO'])
print(pid + ') Irrelevant symbols dropped.')
# Generate the intervals for the predictor
x, y = fe.generate_train_intervals(data_df,
train_val_time,
base_days,
step_days,
ahead_days,
today,
fe.feature_close_one_to_one)
print(pid + ') Intervals generated')
# Drop "bad" samples and fill missing data
x_y_df = pd.concat([x, y], axis=1)
x_y_df = pp.drop_irrelevant_samples(x_y_df, params['SAMPLES_GOOD_DATA_RATIO'])
x = x_y_df.iloc[:, :-1]
y = x_y_df.iloc[:, -1]
x = pp.fill_missing(x)
print(pid + ') Irrelevant samples dropped and missing data filled.')
# Pickle that
x.to_pickle('../../data/x_{}.pkl'.format(pid))
y.to_pickle('../../data/y_{}.pkl'.format(pid))
toc = time()
print('%s) %i intervals generated in: %i seconds.' % (pid, x.shape[0], (toc-tic)))
return pid, x, y
for ind in range(datasets_params_list_df.shape[0]):
pid, x, y = generate_one_set(datasets_params_list_df.iloc[ind,:])
datasets_params_list_df['x_filename'] = datasets_params_list_df.apply(lambda x:
'x_base{}_ahead{}.pkl'.format(int(x['base_days']),
int(x['ahead_days'])), axis=1)
datasets_params_list_df['y_filename'] = datasets_params_list_df.apply(lambda x:
'y_base{}_ahead{}.pkl'.format(int(x['base_days']),
int(x['ahead_days'])), axis=1)
datasets_params_list_df
datasets_params_list_df.to_pickle('../../data/datasets_params_list_df.pkl')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There is a special do nothing word
Step2: For
Step3: You can combine loops and conditionals
Step4: range()
Step6: <hr>
Step8: To call the function, use
Step9: If you don't provide an argument or too many, you get an error.
Step11: Parameters (or arguments) in Python are all passed by reference. This means that if you modify the parameters in the function, they are modified outside of the function.
Step13: Variables have scope
Step14: In general, you want to use parameters to provide data to a function and return a result with the return. E.g.
Step15: Parameters have three different types
Step16: Take a minute and play around with the above function. Which are required? Keyword? Default?
Step17: Functions can contain any code that you put anywhere else including
Step18: Once you have some code that is functionalized and not going to change, you can move it to a file that ends in .py, check it into version control, import it into your notebook and use it!
|
<ASSISTANT_TASK:>
Python Code:
instructors = ['Dave', 'Jim', 'Dorkus the Clown']
if 'Dorkus the Clown' in instructors:
print('#fakeinstructor')
if 'Jim' in instructors:
print("Congratulations! Jim is teaching, your class won't stink!")
else:
pass
for instructor in instructors:
print(instructor)
for instructor in instructors:
if instructor.endswith('Clown'):
print(instructor + " doesn't sound like a real instructor name!")
else:
print(instructor + " is so smart... all those gooey brains!")
sum = 0
for i in range(10):
sum += i
print(sum)
def print_string(str):
This prints out a string passed as the parameter.
print(str)
for c in str:
print(c)
if c == 'r':
break
print("done")
return
print_string("string")
print_string("Dave is awesome!")
#print_string()
def change_list(my_list):
This changes a passed list into this function
my_list.append('four');
print('list inside the function: ', my_list)
return
my_list = [1, 2, 3];
print('list before the function: ', my_list)
change_list(my_list);
print('list after the function: ', my_list)
my_global_1 = 'bad idea'
my_global_2 = 'another bad one'
my_global_3 = 'better idea'
def my_function():
print(my_global_1)
my_global_2 = 'broke your global, man!'
print(my_global_2)
global my_global_3
my_global_3 = 'still a better idea'
return
my_function()
print(my_global_2)
print(my_global_3)
def a_function(parameter):
return None
foo = a_function('bar')
print(foo)
def print_name(first, last='the Clown'):
print('Your name is %s %s' % (first, last))
return
def massive_correlation_analysis(data, method='pearson'):
pass
return
def print_name_age(first, last, age):
print_name(first, last)
print('Your age is %d' % (age))
print('Your age is ' + str(age))
if age > 35:
print('You are really old.')
return
print_name_age(age=40, last='Beck', first='Dave')
def download_if_not_exists(url, filename):
if os.path.exists(filename):
pass
else:
req = requests.get(url)
assert req.status_code == 200 # if the download failed, this line will generate an error
with open(filename, 'wb') as f:
f.write(req.content)
def load_HCEPDB_data(url, zip_filename, csv_filename):
download_if_not_exists(url, zip_filename)
zf = zipfile.ZipFile(zip_filename)
data = pd.read_csv(zf.open(csv_filename))
return data
import os
import requests
import pandas as pd
import zipfile
load_HCEPDB_data('http://faculty.washington.edu/dacb/HCEPDB_moldata_set1.zip', 'HCEPDB_moldata_set1.zip', 'HCEPDB_moldata_set1.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We begin with def then a space followed by the name of the function. Try to keep names relevant, for example len() is a good name for a length() function. Also be careful with names, you wouldn't want to call a function the same name as a built-in function in Python (such as len).
Step2: Call the function
Step3: Example 2
Step4: Using return
Step5: What happens if we input two strings?
Step6: Note that because we don't declare variable types in Python, this function could be used to add numbers or sequences together! We'll later learn about adding in checks to make sure a user puts in the correct arguments into a function.
Step7: Note how we break the code after the print statement! We can actually improve this by only checking to the square root of the target number, also we can disregard all even numbers after checking for 2. We'll also switch to returning a boolean value to get an exaple of using return statements
|
<ASSISTANT_TASK:>
Python Code:
def name_of_function(arg1,arg2):
'''
This is where the function's Document String (docstring) goes
'''
# Do stuff here
#return desired result
def say_hello():
print 'hello'
say_hello()
def greeting(name):
print 'Hello %s' %name
greeting('Jose')
def add_num(num1,num2):
return num1+num2
add_num(4,5)
# Can also save as variable due to return
result = add_num(4,5)
print result
print add_num('one','two')
def is_prime(num):
'''
Naive method of checking for primes.
'''
for n in range(2,num):
if num % n == 0:
print 'not prime'
break
else: # If never mod zero, then prime
print 'prime'
is_prime(16)
import math
def is_prime(num):
'''
Better method of checking for primes.
'''
if num % 2 == 0 and num > 2:
return False
for i in range(3, int(math.sqrt(num)) + 1, 2):
if num % i == 0:
return False
return True
is_prime(14)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step6: We start by defining a series helper functions which we will use in creating the plot below.
Step7: Finally we can plot the actual figure.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Ellipse, FancyArrow, Rectangle
from matplotlib.pyplot import cm
%matplotlib inline
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
Return new colormap obtained from `cmap` by extracting
the slice betwen `minval` and `maxval (using `n` values).
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def draw_normalised_dipole_field(ax, dipole_x, dipole_y, xmin, xmax, nx, ymin, ymax, ny):
Draw arrows representing the dipole field created
by a dipole located at (dipole_x, dipole_y).
The arrows are placed on a grid with the bounds
(xmin, xmax, ymin, ymax) and with nx and ny
subdivisions along the x- and y-axis, respectively.
Y, X = np.mgrid[ymin:ymax:ny*1j, xmin:xmax:nx*1j]
MX = np.zeros_like(X)
MY = np.ones_like(Y)
RX = X - dipole_x
RY = Y - dipole_y
r = np.sqrt((X - dipole_x)**2 + (Y - dipole_y)**2)
mdotr = MX * RX + MY * RY
U = 3*mdotr*RX/r**5 - MX/r**3
V = 3*mdotr*RY/r**5 - MY/r**3
speed = np.sqrt(U**2 + V**2)
UN = U/speed
VN = V/speed
cmap = truncate_colormap(cm.inferno_r, 0.15, 1.0)
ax.quiver(X, Y, UN, VN, # data
r, # colour the arrows based on this array
scale=40, width=0.0030,
pivot='middle', cmap=cmap)
def draw_vertical_line(ax, x, ymin, ymax, annotation, color):
Add vertical line to a matplotlib axes, including an annotation.
Arguments:
ax: matplotlib Axes instance to which the line is to be added.
x: x-position of the line
ymin, ymax: vertical extent of the line
annotation: Text to be added at the bottom of the line.
color: Color of the line and annotation.
linewidth=2.0
linestyle='-'
ax.plot((x, x), (ymin - 3, ymax + 3), linewidth=linewidth, linestyle=linestyle, color=color)
ax.annotate(annotation, xy=(x, ymin), xytext=(0, -20),
ha='center', va='top', color=color,
rotation=0, fontsize=20,
xycoords='data', textcoords='offset points')
def draw_particle(ax, x=0, y=10, diameter=20, color='#aaeeff', arrow_color='blue'):
Draw a circle representing the nanoparticle as well as
an arrow indicating its magnetization.
arrow_length = 0.6 * diameter
arrow_bottom = y - 0.5*arrow_length
particle = Ellipse((x, y), diameter, diameter, facecolor=color)
m_particle = FancyArrow(x, arrow_bottom, 0, arrow_length,
color=arrow_color, linewidth=5,
length_includes_head=True, head_width=3, head_length=4)
ax.add_patch(particle)
ax.add_patch(m_particle)
def draw_nanodisc(ax, x=0, y=-70, width=150, height=10, thickness=2, color='white'):
Draw the nanodisc as two ellipses with given
width and height, centered at (x, y).
y_top = y
y_bottom = y - thickness
ellipse_top = Ellipse((x, y_top), width, height, angle=0.0, facecolor=color)
ellipse_bottom = Ellipse((x, y_bottom), width, height, angle=0.0, facecolor=color)
rect = Rectangle((-0.5*width, y_bottom), width, thickness, facecolor=color, edgecolor='none')
# Draw the bottom ellipse first, then the rectangle
# to cover the upper half of it and then draw the top
# ellipse. Note that this is not strictly necessary
# for a white ellipse but it
ax.add_patch(ellipse_bottom)
ax.add_patch(rect)
ax.add_patch(ellipse_top)
# Draw the sides connecting the top and bottom ellipse.
# For some reason it looks better if we shift them to
# the left by a tiny amount.
xshift = -0.1
xleft = -0.5 * width + xshift
xright = +0.5 * width + xshift
ax.plot([xleft, xleft], [y_bottom, y_top], color='black', linewidth=1)
ax.plot([xright, xright], [y_bottom, y_top], color='black', linewidth=1)
# Position of dipole (shown as cyan-coloured particle below)
dipole_x, dipole_y = 0, 10
xmin, xmax = -80, 80
ymin, ymax = -82, 25
xmin_fld, xmax_fld = -75, 75
ymin_fld, ymax_fld = -50, -5
nx_fld, ny_fld = 40, 16
plt.style.use('style_sheets/fig2.mplstyle')
fig, ax = plt.subplots(figsize=(12, 7))
# Tweak appearance of the axis spines etc.
ax.set_aspect('equal')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
xticks = [-50, 0, 50]
yticks = [0, -10, -20, -30, -40, -50]
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_yticklabels([str(-y) for y in yticks])
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_bounds(-55, 5)
ax.set_xlabel('Lateral offset from particle centre (nm)', labelpad=20)
ax.set_ylabel(r'Vertical separation $d$ (nm)', labelpad=20, y=0.62)
# Plot particle, nanodisc, dipole field and vertical lines
draw_particle(ax, x=dipole_x, y=dipole_y, color='#aaeeff')
draw_nanodisc(ax, color='white')
draw_normalised_dipole_field(ax, dipole_x, dipole_y,
xmin_fld, xmax_fld, nx_fld,
ymin_fld, ymax_fld, ny_fld)
draw_vertical_line(ax, x= 0, ymin=ymin_fld, ymax=ymax_fld, annotation='N=1', color='darkblue')
draw_vertical_line(ax, x=-41, ymin=ymin_fld, ymax=ymax_fld, annotation='N=2', color='red')
draw_vertical_line(ax, x=+41, ymin=ymin_fld, ymax=ymax_fld, annotation='N=2', color='red')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Environment
Step3: Try out Environment
Step4: Train model
Step5: Visualizing Results
Step6: Enjoy model
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/openai/baselines >/dev/null
!pip install gym >/dev/null
import numpy as np
import random
import gym
from gym.utils import seeding
from gym import spaces
def state_name_to_int(state):
state_name_map = {
'S': 0,
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'H': 8,
'K': 9,
'L': 10,
'M': 11,
'N': 12,
'O': 13
}
return state_name_map[state]
def int_to_state_name(state_as_int):
state_map = {
0: 'S',
1: 'A',
2: 'B',
3: 'C',
4: 'D',
5: 'E',
6: 'F',
7: 'G',
8: 'H',
9: 'K',
10: 'L',
11: 'M',
12: 'N',
13: 'O'
}
return state_map[state_as_int]
class BeraterEnv(gym.Env):
The Berater Problem
Actions:
There are 4 discrete deterministic actions, each choosing one direction
metadata = {'render.modes': ['ansi']}
showStep = False
showDone = True
envEpisodeModulo = 100
def __init__(self):
# self.map = {
# 'S': [('A', 100), ('B', 400), ('C', 200 )],
# 'A': [('B', 250), ('C', 400), ('S', 100 )],
# 'B': [('A', 250), ('C', 250), ('S', 400 )],
# 'C': [('A', 400), ('B', 250), ('S', 200 )]
# }
self.map = {
'S': [('A', 300), ('B', 100), ('C', 200 )],
'A': [('S', 300), ('B', 100), ('E', 100 ), ('D', 100 )],
'B': [('S', 100), ('A', 100), ('C', 50 ), ('K', 200 )],
'C': [('S', 200), ('B', 50), ('M', 100 ), ('L', 200 )],
'D': [('A', 100), ('F', 50)],
'E': [('A', 100), ('F', 100), ('H', 100)],
'F': [('D', 50), ('E', 100), ('G', 200)],
'G': [('F', 200), ('O', 300)],
'H': [('E', 100), ('K', 300)],
'K': [('B', 200), ('H', 300)],
'L': [('C', 200), ('M', 50)],
'M': [('C', 100), ('L', 50), ('N', 100)],
'N': [('M', 100), ('O', 100)],
'O': [('N', 100), ('G', 300)]
}
max_paths = 4
self.action_space = spaces.Discrete(max_paths)
positions = len(self.map)
# observations: position, reward of all 4 local paths, rest reward of all locations
# non existing path is -1000 and no position change
# look at what #getObservation returns if you are confused
low = np.append(np.append([0], np.full(max_paths, -1000)), np.full(positions, 0))
high = np.append(np.append([positions - 1], np.full(max_paths, 1000)), np.full(positions, 1000))
self.observation_space = spaces.Box(low=low,
high=high,
dtype=np.float32)
self.reward_range = (-1, 1)
self.totalReward = 0
self.stepCount = 0
self.isDone = False
self.envReward = 0
self.envEpisodeCount = 0
self.envStepCount = 0
self.reset()
self.optimum = self.calculate_customers_reward()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def iterate_path(self, state, action):
paths = self.map[state]
if action < len(paths):
return paths[action]
else:
# sorry, no such action, stay where you are and pay a high penalty
return (state, 1000)
def step(self, action):
destination, cost = self.iterate_path(self.state, action)
lastState = self.state
customerReward = self.customer_reward[destination]
reward = (customerReward - cost) / self.optimum
self.state = destination
self.customer_visited(destination)
done = destination == 'S' and self.all_customers_visited()
stateAsInt = state_name_to_int(self.state)
self.totalReward += reward
self.stepCount += 1
self.envReward += reward
self.envStepCount += 1
if self.showStep:
print( "Episode: " + ("%4.0f " % self.envEpisodeCount) +
" Step: " + ("%4.0f " % self.stepCount) +
lastState + ' --' + str(action) + '-> ' + self.state +
' R=' + ("% 2.2f" % reward) + ' totalR=' + ("% 3.2f" % self.totalReward) +
' cost=' + ("%4.0f" % cost) + ' customerR=' + ("%4.0f" % customerReward) + ' optimum=' + ("%4.0f" % self.optimum)
)
if done and not self.isDone:
self.envEpisodeCount += 1
if BeraterEnv.showDone:
episodes = BeraterEnv.envEpisodeModulo
if (self.envEpisodeCount % BeraterEnv.envEpisodeModulo != 0):
episodes = self.envEpisodeCount % BeraterEnv.envEpisodeModulo
print( "Done: " +
("episodes=%6.0f " % self.envEpisodeCount) +
("avgSteps=%6.2f " % (self.envStepCount/episodes)) +
("avgTotalReward=% 3.2f" % (self.envReward/episodes) )
)
if (self.envEpisodeCount%BeraterEnv.envEpisodeModulo) == 0:
self.envReward = 0
self.envStepCount = 0
self.isDone = done
observation = self.getObservation(stateAsInt)
info = {"from": self.state, "to": destination}
return observation, reward, done, info
def getObservation(self, position):
result = np.array([ position,
self.getPathObservation(position, 0),
self.getPathObservation(position, 1),
self.getPathObservation(position, 2),
self.getPathObservation(position, 3)
],
dtype=np.float32)
all_rest_rewards = list(self.customer_reward.values())
result = np.append(result, all_rest_rewards)
return result
def getPathObservation(self, position, path):
source = int_to_state_name(position)
paths = self.map[self.state]
if path < len(paths):
target, cost = paths[path]
reward = self.customer_reward[target]
result = reward - cost
else:
result = -1000
return result
def customer_visited(self, customer):
self.customer_reward[customer] = 0
def all_customers_visited(self):
return self.calculate_customers_reward() == 0
def calculate_customers_reward(self):
sum = 0
for value in self.customer_reward.values():
sum += value
return sum
def modulate_reward(self):
number_of_customers = len(self.map) - 1
number_per_consultant = int(number_of_customers/2)
# number_per_consultant = int(number_of_customers/1.5)
self.customer_reward = {
'S': 0
}
for customer_nr in range(1, number_of_customers + 1):
self.customer_reward[int_to_state_name(customer_nr)] = 0
# every consultant only visits a few random customers
samples = random.sample(range(1, number_of_customers + 1), k=number_per_consultant)
key_list = list(self.customer_reward.keys())
for sample in samples:
self.customer_reward[key_list[sample]] = 1000
def reset(self):
self.totalReward = 0
self.stepCount = 0
self.isDone = False
self.modulate_reward()
self.state = 'S'
return self.getObservation(state_name_to_int(self.state))
def render(self):
print(self.customer_reward)
env = BeraterEnv()
print(env.reset())
print(env.customer_reward)
BeraterEnv.showStep = True
BeraterEnv.showDone = True
env = BeraterEnv()
print(env)
observation = env.reset()
print(observation)
for t in range(1000):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
print(observation)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
!rm -r logs
!mkdir logs
!mkdir logs/berater
# https://github.com/openai/baselines/blob/master/baselines/deepq/experiments/train_pong.py
# log_dir = logger.get_dir()
log_dir = '/content/logs/berater/'
import gym
from baselines import bench
from baselines import logger
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.vec_monitor import VecMonitor
from baselines.ppo2 import ppo2
BeraterEnv.showStep = False
BeraterEnv.showDone = False
env = BeraterEnv()
wrapped_env = DummyVecEnv([lambda: BeraterEnv()])
monitored_env = VecMonitor(wrapped_env, log_dir)
# https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py
# https://github.com/openai/baselines/blob/master/baselines/common/models.py#L30
%time model = ppo2.learn(\
env=monitored_env,\
network='mlp',\
num_hidden=5000,\
num_layers=3,\
ent_coef=0.01,\
total_timesteps=500000)
# %time model = ppo2.learn(\
# env=monitored_env,\
# network='mlp',\
# num_hidden=2000,\
# num_layers=3,\
# ent_coef=0.1,\
# total_timesteps=500000)
# model = ppo2.learn(
# env=monitored_env,\
# layer_norm=True,\
# network='mlp',\
# num_hidden=2000,\
# activation=tf.nn.relu,\
# num_layers=3,\
# ent_coef=0.03,\
# total_timesteps=1000000)
# monitored_env = bench.Monitor(env, log_dir)
# https://en.wikipedia.org/wiki/Q-learning#Influence_of_variables
# %time model = deepq.learn(\
# monitored_env,\
# seed=42,\
# network='mlp',\
# lr=1e-3,\
# gamma=0.99,\
# total_timesteps=30000,\
# buffer_size=50000,\
# exploration_fraction=0.5,\
# exploration_final_eps=0.02,\
# print_freq=1000)
model.save('berater-ppo-v7.pkl')
monitored_env.close()
# !ls -l $log_dir
from baselines.common import plot_util as pu
results = pu.load_results(log_dir)
import matplotlib.pyplot as plt
import numpy as np
r = results[0]
plt.ylim(0, .75)
# plt.plot(np.cumsum(r.monitor.l), r.monitor.r)
plt.plot(np.cumsum(r.monitor.l), pu.smooth(r.monitor.r, radius=100))
import numpy as np
observation = env.reset()
env.render()
state = np.zeros((1, 2*128))
dones = np.zeros((1))
BeraterEnv.showStep = True
BeraterEnv.showDone = False
for t in range(1000):
actions, _, state, _ = model.step(observation, S=state, M=dones)
observation, reward, done, info = env.step(actions[0])
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Model
Step3: Session
Step4: Visualization
Step5: Test translation from the dataset
Step6: Visualize Custom Sentence
Step7: Interpreting the Visualizations
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
import numpy as np
from DLT2T.utils import trainer_utils as utils
from DLT2T.visualization import attention
%%javascript
require.config({
paths: {
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.8/d3.min'
}
});
import os
# PUT THE MODEL YOU WANT TO LOAD HERE!
PROBLEM = 'translate_ende_wmt32k'
MODEL = 'transformer'
HPARAMS = 'transformer_base_single_gpu'
DATA_DIR=os.path.expanduser('~/t2t_data')
TRAIN_DIR=os.path.expanduser('~/t2t_train/%s/%s-%s' % (PROBLEM, MODEL, HPARAMS))
print(TRAIN_DIR)
FLAGS = tf.flags.FLAGS
FLAGS.problems = PROBLEM
FLAGS.hparams_set = HPARAMS
FLAGS.data_dir = DATA_DIR
FLAGS.model = MODEL
hparams = utils.create_hparams(HPARAMS, PROBLEM, DATA_DIR)
# SET EXTRA HYPER PARAMS HERE!
# e.g.
# hparams.batch_size = 1024
num_datashards = utils.devices.data_parallelism().n
input_fn = utils.input_fn_builder.build_input_fn(
mode=tf.estimator.ModeKeys.EVAL,
hparams=hparams,
data_dir=DATA_DIR,
num_datashards=num_datashards)
inputs, target = input_fn()
features = inputs
features['targets'] = target
def encode(string):
subtokenizer = hparams.problems[0].vocabulary['inputs']
return [subtokenizer.encode(string) + [1] + [0]]
def decode(ids):
return hparams.problems[0].vocabulary['targets'].decode(np.squeeze(ids))
def to_tokens(ids):
ids = np.squeeze(ids)
subtokenizer = hparams.problems[0].vocabulary['targets']
tokens = []
for _id in ids:
if _id == 0:
tokens.append('<PAD>')
elif _id == 1:
tokens.append('<EOS>')
else:
tokens.append(subtokenizer._subtoken_id_to_subtoken_string(_id))
return tokens
spec = utils.model_builder.model_fn(MODEL, features, tf.estimator.ModeKeys.EVAL, hparams, problem_names=[PROBLEM])
predictions_dict = spec.predictions
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
spec = utils.model_builder.model_fn(MODEL, features, tf.estimator.ModeKeys.PREDICT, hparams, problem_names=[PROBLEM])
beam_out = spec.predictions['outputs']
sv = tf.train.Supervisor(
logdir=TRAIN_DIR,
global_step=tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step'))
sess = sv.PrepareSession(config=tf.ConfigProto(allow_soft_placement=True))
sv.StartQueueRunners(
sess,
tf.get_default_graph().get_collection(tf.GraphKeys.QUEUE_RUNNERS))
# Get the attention tensors from the graph.
# This need to be done using the training graph since the inference uses a tf.while_loop
# and you cant fetch tensors from inside a while_loop.
enc_atts = []
dec_atts = []
encdec_atts = []
for i in range(hparams.num_hidden_layers):
enc_att = tf.get_default_graph().get_operation_by_name(
"body/model/parallel_0/body/encoder/layer_%i/self_attention/multihead_attention/dot_product_attention/attention_weights" % i).values()[0]
dec_att = tf.get_default_graph().get_operation_by_name(
"body/model/parallel_0/body/decoder/layer_%i/self_attention/multihead_attention/dot_product_attention/attention_weights" % i).values()[0]
encdec_att = tf.get_default_graph().get_operation_by_name(
"body/model/parallel_0/body/decoder/layer_%i/encdec_attention/multihead_attention/dot_product_attention/attention_weights" % i).values()[0]
enc_atts.append(enc_att)
dec_atts.append(dec_att)
encdec_atts.append(encdec_att)
inp, out, logits = sess.run([inputs['inputs'], target, predictions_dict['predictions']])
print("Input: ", decode(inp[0]))
print("Gold: ", decode(out[0]))
logits = np.squeeze(logits[0])
tokens = np.argmax(logits, axis=1)
print("Gold out: ", decode(tokens))
eng = "I have three dogs."
inp_ids = encode(eng)
beam_decode = sess.run(beam_out, {
inputs['inputs']: np.expand_dims(np.expand_dims(inp_ids, axis=2), axis=3),
})
trans = decode(beam_decode[0])
print(trans)
output_ids = beam_decode
# Get attentions
np_enc_atts, np_dec_atts, np_encdec_atts = sess.run([enc_atts, dec_atts, encdec_atts], {
inputs['inputs']: np.expand_dims(np.expand_dims(inp_ids, axis=2), axis=3),
target: np.expand_dims(np.expand_dims(output_ids, axis=2), axis=3),
})
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
inp_text = to_tokens(inp_ids)
out_text = to_tokens(output_ids)
attention.show(inp_text, out_text, np_enc_atts, np_dec_atts, np_encdec_atts)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a new dictionary with the following structure and then export as a json object
|
<ASSISTANT_TASK:>
Python Code:
#Find a list of users with at least 20 reviews
user_list = []
for user in users.find():
if user['review_count'] >= 20:
user_list.append(user['_id'])
else:
pass
user_reviews = dict.fromkeys(user_list, 0)
for review in reviews.find():
try:
if user_reviews[review['_id']] == 0:
print review['_id']
print review
break
except KeyError:
pass
# user_reviews[review['_id']] = [review]
# else:
# user_reviews[review['_id']].append(review)
# except KeyError:
# pass
user_reviews[user_reviews.keys()[23]]
filtered_reviews = {}
for user in user_reviews.keys():
if user_reviews[user] != 0:
filtered_reviews[user] = user_reviews[user]
#We have this many users after our filtering
len(filtered_reviews)
#Dump file of cleaned up user data
with open('merged_user_reviews.json', 'w') as fp:
json.dump(user_reviews, fp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Indentation
Step2: You can also add an else statement (and a colon) with an indented block of code you want to run if the condition resolves to False.
Step3: If you need to, you can add multiple conditions with elif.
Step4: for loops
|
<ASSISTANT_TASK:>
Python Code:
# import the csv module from the Python standard library
# https://docs.python.org/3/library/csv.html
import csv
# import the BeautifulSoup class from the (external) bs4 package
from bs4 import BeautifulSoup
# import variables from a local file, my_module.py
# alias to `mm` using the `as` keyword
import my_module as mm
# access bits of the module with a period .
print(mm.charlie)
if 4 < 6:
print('4 is less than 6')
if 4 > 6:
print('4 is greater than 6?!')
else:
print('4 is not greater than 6.')
HOME_SCORE = 6
AWAY_SCORE = 8
if HOME_SCORE > AWAY_SCORE:
print('we won!')
elif HOME_SCORE == AWAY_SCORE:
print('we tied!')
else:
print('we lost!')
# let's import a list of ingredients for pico de gallo
from my_module import pico_ingredients as pi
# print the list to see what's there
print(pi)
# now use a `for` loop to iterate over the list and print each item
# ("ingredient" is an arbitrary variable name)
for ingredient in pi:
print(ingredient)
# you can also iterate over a dictionary
# let's import our puppy
from my_module import charlie
# the iterable element in a dictionary is the key
# so I like to use that as my variable
# --> remember! order is not guaranteed in a dict
for key in charlie:
print(key, charlie[key])
# lots of things are iterable -- you can even loop over the characters in a string!
name = 'Cody Winchester'
for letter in name:
print(letter)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to Derivatives
Step2: Derivative of $f$
Step3: Chain rule of differentiation
Step4: Enter Theano
Step5: Exercise
Step6: Multivariate Functions
Step7: Introducing Partial Derivatives
Step8: Enter theano.gradient.jacobian
Step9: Exercise
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from theano import tensor as T
from theano import function
from theano.gradient import jacobian
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = 8, 6
plt.style.use('ggplot')
%matplotlib inline
xx = np.linspace(0, 100, 100)
yy = 2 * xx
plt.plot(xx, yy)
plt.vlines(70, 0, yy[70], linestyles="dashed", colors="g")
plt.vlines(85, 0, yy[85], linestyles="dashed", colors="g")
plt.hlines(yy[70], 0, 70, linestyles="dashed", colors="g")
plt.hlines(yy[85], 0, 85, linestyles="dashed", colors="g")
plt.xticks([70, 85], [r"$a$", r"$a + \Delta a$"], fontsize=12, color="k")
plt.yticks([yy[70], yy[85]], [r"$f(a)$", r"$f(a + \Delta a)$"], fontsize=12, color="k")
plt.xlabel(r'$x$', fontsize=16, color="k")
plt.ylabel(r'$f(x)$', fontsize=16, color="k")
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
x = np.linspace(-6, 6, 100)
f = sigmoid(x)
plt.plot(x, f)
plt.xlabel(r'$x$', fontsize=20, color="k")
plt.ylabel(r'$\frac{1}{1 + e^{-x}}$', fontsize=20, color="k")
x = np.linspace(-6, 6, 100)
f = sigmoid(x)
df_dx = f * (1 - f)
plt.plot(x, f, label=r'$f(x)$')
plt.plot(x, df_dx, label=r'$\frac{df}{dx}$')
plt.xlabel(r'$x$', fontsize=20, color="k")
plt.legend()
x = T.dvector('x')
f = 1 / (1 + T.exp(-x))
df_dx = T.grad(f.sum(), wrt=[x])
sigmoid = function([x], f)
d_sigmoid = function([x], df_dx)
xx = np.linspace(-6, 6, 100)
plt.plot(xx, sigmoid(xx), label=r'$f(x)$')
plt.plot(xx, d_sigmoid(xx)[0], label=r'$\frac{df}{dx}$')
plt.xlabel(r'$x$', fontsize=20, color="k")
plt.legend()
# enter code here
xx = np.linspace(0, 10, 100)
yy = np.linspace(0, 10, 100)
X, Y = np.meshgrid(xx, yy)
f = Y * (X ** 2 + 1)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, f, cmap=plt.cm.coolwarm)
ax.set_zticklabels([])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xlabel(r'$x$', fontsize=16, color="k")
ax.set_ylabel(r'$y$', fontsize=16, color="k")
ax.set_zlabel(r'$x^{2}y + y$', fontsize=20, color="k", labelpad=0)
ax.autoscale_view()
plt.tight_layout()
del_f = np.c_[2 * xx * yy, (xx ** 2) + 1].T
print(del_f)
x = T.dvector('x')
y = T.dvector('y')
f = (x ** 2) * y + y
delf = jacobian(f.sum(), wrt=[x, y])
get_jacobian = function([x, y], delf)
J1 = np.c_[get_jacobian(xx, yy)]
print(np.allclose(J1, del_f))
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot(xx, yy, J1[0, :], label=r"$2xy$")
ax.plot(xx, yy, J1[1, :], label=r"$x^{2} + 1$")
ax.legend()
# enter code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining the elements
Step2: Running the experiment
Step3: Lets run only 10 generations
Step4: Essential features
Step5: The first individual can now be built
Step6: Printing the individual ind1 and checking if its fitness is valid will give something like this
Step7: The individual is printed as its base class representation (here a list) and the fitness is invalid because it contains no values.
Step8: Dealing with single objective fitness is not different, the evaluation function must return a tuple because single-objective is treated as a special case of multi-objective.
Step9: The fitness’ values are deleted because they not related to the individual anymore. As stated above, the mutation does mutate and only mutate an individual it is not responsible of invalidating the fitness nor anything else. The following shows that ind2 and mutant are in fact the same individual.
Step10: Crossover
Step11: Selection
Step12: Using the Toolbox
Step13: Tool Decoration
Step14: This will work on crossover and mutation because both return a tuple of individuals. The mutation is often considered to return a single individual but again like for the evaluation, the single individual case is a special case of the multiple individual case.
Step15: Algorithms
Step16: Computing Statistics
Step17: The statistics object is created using a key as first argument. This key must be supplied a function that will later be applied to the data on which the statistics are computed. The previous code sample uses the fitness.values attribute of each element.
Step18: The statistical functions are now registered.
Step19: Statistics will automatically be computed on the population every generation.
Step20: The argument to the compile function must be an iterable of elements on which the key will be called. Here, our population (pop) contains individuals.
Step21: Logging Data
Step22: The record() method takes a variable number of argument, each of which is a data to be recorded. In the last example, we saved the generation, the number of evaluations and everything contained in the record produced by a statistics object using the star magic. All record will be kept in the logbook until its destruction.
Step23: The select() method provides a way to retrieve all the information associated with a keyword in all records. This method takes a variable number of string arguments, which are the keywords used in the record or statistics object. Here, we retrieved the generation and the average fitness using a single call to select.
Step24: The result is
Step25: Plotting Features
Step29: <img src='http
|
<ASSISTANT_TASK:>
Python Code:
import random
from deap import algorithms, base, creator, tools
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
def evalOneMax(individual):
return (sum(individual),)
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, n=100)
toolbox.register("population", tools.initRepeat, list,
toolbox.individual)
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
pop = toolbox.population(n=300)
result = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2,
ngen=10, verbose=False)
print('Current best fitness:', evalOneMax(tools.selBest(pop, k=1)[0]))
result = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2,
ngen=50, verbose=False)
print('Current best fitness:', evalOneMax(tools.selBest(pop, k=1)[0]))
import random
from deap import base
from deap import creator
from deap import tools
IND_SIZE = 5
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox1 = base.Toolbox()
toolbox1.register("attr_float", random.random)
toolbox1.register("individual", tools.initRepeat, creator.Individual,
toolbox1.attr_float, n=IND_SIZE)
ind1 = toolbox1.individual()
print ind1
print ind1.fitness.valid
def evaluate(individual):
# Do some hard computing on the individual
a = sum(individual)
b = len(individual)
return a, 1. / b
ind1.fitness.values = evaluate(ind1)
print ind1.fitness.valid
print ind1.fitness
mutant = toolbox1.clone(ind1)
ind2, = tools.mutGaussian(mutant, mu=0.0, sigma=0.2, indpb=0.2)
del mutant.fitness.values
print ind2 is mutant
print mutant is ind2
child1, child2 = [toolbox1.clone(ind) for ind in (ind1, ind2)]
tools.cxBlend(child1, child2, 0.5)
del child1.fitness.values
del child2.fitness.values
selected = tools.selBest([child1, child2], 2)
print child1 in selected
from deap import base
from deap import tools
toolbox1 = base.Toolbox()
def evaluateInd(individual):
# Do some computation
return result,
toolbox1.register("mate", tools.cxTwoPoint)
toolbox1.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
toolbox1.register("select", tools.selTournament, tournsize=3)
toolbox1.register("evaluate", evaluateInd)
def checkBounds(min, max):
def decorator(func):
def wrapper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in xrange(len(child)):
if child[i] > max:
child[i] = max
elif child[i] < min:
child[i] = min
return offspring
return wrapper
return decorator
toolbox.register("mate_example", tools.cxBlend, alpha=0.2)
toolbox.register("mutate_example", tools.mutGaussian, mu=0, sigma=2)
MIN = 0; MAX = 10
toolbox.decorate("mate_example", checkBounds(MIN, MAX))
toolbox.decorate("mutate_example", checkBounds(MIN, MAX))
from deap import algorithms
NGEN = 20 # number of generations
CXPB = 0.6
MUTPB = 0.05
for g in range(NGEN):
# Select and clone the next generation individuals
offspring = map(toolbox.clone, toolbox.select(pop, len(pop)))
# Apply crossover and mutation on the offspring
offspring = algorithms.varAnd(offspring, toolbox, CXPB, MUTPB)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring
pop[:] = offspring
from deap import algorithms
result = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=50)
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
import numpy
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=0,
stats=stats, verbose=True)
record = stats.compile(pop)
>>> print(record)
{'std': 4.96, 'max': 63.0, 'avg': 50.2, 'min': 39.0}
logbook = tools.Logbook()
logbook.record(gen=0, evals=30, **record)
gen, avg = logbook.select("gen", "avg")
logbook.header = "gen", "avg", "spam"
print(logbook)
gen = logbook.select("gen")
fit_mins = logbook.chapters["fitness"].select("min")
size_avgs = logbook.chapters["size"].select("avg")
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax1 = plt.subplots()
line1 = ax1.plot(gen, fit_mins, "b-", label="Minimum Fitness")
ax1.set_xlabel("Generation")
ax1.set_ylabel("Fitness", color="b")
for tl in ax1.get_yticklabels():
tl.set_color("b")
ax2 = ax1.twinx()
line2 = ax2.plot(gen, size_avgs, "r-", label="Average Size")
ax2.set_ylabel("Size", color="r")
for tl in ax2.get_yticklabels():
tl.set_color("r")
lns = line1 + line2
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc="center right")
plt.show()
from math import sin
from deap import base
from deap import tools
def evalFct(individual):
Evaluation function for the individual.
x = individual[0]
return (x - 5)**2 * sin(x) * (x/3),
def feasible(individual):
Feasability function for the individual. Returns True if feasible False
otherwise.
if 3 < individual[0] < 5:
return True
return False
def distance(individual):
A distance function to the feasability region.
return (individual[0] - 5.0)**2
toolbox = base.Toolbox()
toolbox.register("evaluate", evalFct)
toolbox.decorate("evaluate", tools.DeltaPenality(feasible, 7.0, distance))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is the file which gives the methodology behind the use of xgboost. xgboost can be found at https
Step2: Make testing and training data for feature extraction.
Step3: Now the starting parameters for the are not optimised. First we optimise the decision trees main parameters. The depth and the minimum child weight.
Step4: Make a dictionary of the best parameters.
Step5: We see that a max depth of 3 and min_child_weight of 7 is the best. Now optimise the learning rate and subsample.
Step6: This is where we do the cross validation. We DONT want to overfit!
Step7: We now have a good model for our data.
Step8: Now try the test data
Step9: From this we just make a scatter plot so we can visualise the spacial separation
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
pylab.rcParams['figure.figsize'] = (10, 6)
from datetime import datetime
import Methods as models
import Predictors as predictors
import stock_tools as st
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
import numpy as np
import seaborn as sns
sns.set(font_scale = 1.5)
# Create a template with the available variables
interest = 'SPY'
start_date = datetime.strptime('2000-01-01', '%Y-%m-%d')
end_date = datetime.strptime('2010-12-31', '%Y-%m-%d')
# Get the data and correct for fluctuations
data = st.get_data(start_date,end_date,from_file=True)
corr_data = st.ohlc_adj(data)
# Create a predictors class which we will base our decisions from
pred = predictors.Predictors(corr_data)
# The data is far too noisy to make accurate predictions.
# We apply a 5 day exponential rolling filter. This should preserve
# shape and reduce noise.
pred.e_filter(5)
imp = pred.props
i = 252*2
ndays = 252*2
forward_look = 0
ind = int(np.round(ndays * 0.8))
temp = ((1 - pred.data.Close.shift(1).div(pred.data.Close)) > 0)*1
X_TRAIN = imp.ix[(i - ndays):(i - ndays + ind)]
Y_TRAIN = temp.ix[imp.ix[(i - ndays+forward_look):(i - ndays + ind+forward_look)].index]
X_TEST = imp.ix[(i - ndays + ind):i]
Y_TEST = temp.ix[imp.ix[(i - ndays + ind+forward_look):(i+forward_look)].index]
cv_params = {'max_depth': [3,5,7,9,11], 'min_child_weight': [1,3,5,7,9]}
# Our other parameters. Note the binary logistic objective as we want to determine rise or fall.
ind_params = {'learning_rate': 0.1, 'n_estimators': 1000, 'seed':0, 'subsample': 0.8, 'colsample_bytree': 0.8,
'objective': 'binary:logistic'}
# Make the optimizer
optimized_GBM = GridSearchCV(xgb.XGBClassifier(**ind_params),
cv_params,
scoring = 'accuracy', cv = 5, n_jobs = -1)
# Optimise on the training data
optimized_GBM.fit(X_TRAIN, Y_TRAIN)
# Possible parameter combinations
print(optimized_GBM.grid_scores_)
best = sorted(optimized_GBM.grid_scores_, key=lambda x: (x[1], -np.std(x[2]), -x.parameters['max_depth']))[
-1].parameters
cv_params = {'learning_rate': [0.1, 0.01, 0.005], 'subsample': [0.7,0.8,0.9]}
ind_params = {'n_estimators': 1000, 'seed': 0, 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth': best["max_depth"],
'min_child_weight': best["min_child_weight"]}
optimized_GBM = GridSearchCV(xgb.XGBClassifier(**ind_params),
cv_params,
scoring = 'accuracy', cv = 5, n_jobs = -1)
optimized_GBM.fit(X_TRAIN, Y_TRAIN)
# Add these parameters to the dictionary.
best = {**best, **sorted(optimized_GBM.grid_scores_, key=lambda x: (x[1], -np.std(x[2]), x.parameters['subsample']))[
-1].parameters}
# Create a cross validation matrix
xgdmat = xgb.DMatrix(X_TRAIN, Y_TRAIN)
# We see that a subsample of 0.9 and learning rate of 0.005 is the best
our_params = {'eta': best["learning_rate"], 'seed': 0, 'subsample': best["subsample"], 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth': best["max_depth"],
'min_child_weight': best["min_child_weight"]}
cv_xgb = xgb.cv(params = our_params, dtrain = xgdmat, num_boost_round = 4000, nfold = 10,
metrics = ['error'], # Make sure you enter metrics inside a list or you may encounter issues!
early_stopping_rounds = 100) # Look for early stopping that minimizes error
print(cv_xgb.tail(5))
our_params = {'eta': best["learning_rate"], 'seed': 0, 'subsample': best["subsample"], 'colsample_bytree': 0.8,
'objective': 'binary:logistic', 'max_depth': best["max_depth"],
'min_child_weight': best["min_child_weight"]}
final_gb = xgb.train(our_params, xgdmat, num_boost_round = 432)
# Now we check out the feature importance
# We find that RStok0, meanfractal, mom and MACD_I and dvol are important.
xgb.plot_importance(final_gb)
plt.show()
testdmat = xgb.DMatrix(X_TEST)
y_pred = final_gb.predict(testdmat) # Predict using our testdmat
print(y_pred)
predicted = y_pred
predicted[predicted > 0.5] = 1
predicted[predicted <= 0.5] = 0
X_TEST["REAL"] = Y_TEST
X_TEST["PRED"] = predicted
ret = accuracy_score(predicted, Y_TEST), 1-accuracy_score(predicted, Y_TEST)
print("Accuracy is %s" % ret[0])
m = []
m.append(models.ML(pred.props))
m[0].pred.PHH = ((1 - pred.data.Close.shift(1).div(pred.data.Close)) > 0)*1
ax = pred.props.ix[m[0].pred.PHH == 0].plot.scatter(x='meanfractal', y='RStok0', label='Decrease',color="b")
a2 = pred.props.ix[m[0].pred.PHH == 1].plot.scatter(x='meanfractal', y='RStok0', label='Increase',color="r",ax=ax)
ax.set_title("Feature importance")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To sort tweets by favorites or retweets, need to convert unicode to integers
Step2: For some reason the search did not include Trump's username - random errors like this sometimes happen when querying the twitter website.
Step3: Make word frequency dataframe
Step4: Look at most and least Trump-like tweets at varying levels of background requirement
Step5: At least 1000 background occurrences
Step6: At least 10,000 background occurrences
Step7: And now look at least Trump-like relative to Twitter background
Step8: Trump does not post about things happening automatically.
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
from twords.twords import Twords
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
# this pandas line makes the dataframe display all text in a line; useful for seeing entire tweets
pd.set_option('display.max_colwidth', -1)
twit = Twords()
# set path to folder that contains jar files for twitter search
twit.jar_folder_path = "../jar_files_and_background/"
twit.get_all_user_tweets("realdonaldtrump", tweets_per_run=500)
twit.data_path = "realdonaldtrump"
twit.get_java_tweets_from_csv_list()
twit.convert_tweet_dates_to_standard()
twit.tweets_df["retweets"] = twit.tweets_df["retweets"].map(int)
twit.tweets_df["favorites"] = twit.tweets_df["favorites"].map(int)
twit.tweets_df.sort_values("favorites", ascending=False)[:5]
twit.tweets_df.sort_values("retweets", ascending=False)[:5]
twit.background_path = '../jar_files_and_background/freq_table_72319443_total_words_twitter_corpus.csv'
twit.create_Background_dict()
twit.create_Stop_words()
twit.keep_column_of_original_tweets()
twit.lower_tweets()
twit.keep_only_unicode_tweet_text()
twit.remove_urls_from_tweets()
twit.remove_punctuation_from_tweets()
twit.drop_non_ascii_characters_from_tweets()
twit.drop_duplicate_tweets()
twit.convert_tweet_dates_to_standard()
twit.sort_tweets_by_date()
twit.create_word_bag()
twit.make_nltk_object_from_word_bag()
twit.create_word_freq_df(10000)
twit.word_freq_df.sort_values("log relative frequency", ascending = False, inplace = True)
twit.word_freq_df.head(20)
num_words_to_plot = 32
background_cutoff = 100
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=True).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
num_words_to_plot = 32
background_cutoff = 1000
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=True).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
num_words_to_plot = 32
background_cutoff = 10000
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=True).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
num_words_to_plot = 32
background_cutoff = 10000
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=False).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
twit.tweets_containing("fuck")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interpretación de la FT de una imagen
Step2: Note que solo aparecen aproximadamente dos deltas de Dirac en el espacio frecuencial. De forma análoga al caso unidimensional esos dos puntos corresponden a la frecuencia del seno en el espacio espacial. Note también que los puntos salen en la dirección horizontal, indicando que la dirección del seno bidimensional es horizontal.
Step3: Observe que ahora aparecen unas deltas en la dirección vertical que dan cuenta del seno en la dirección vertical.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib
import pylab as plt
import scipy.misc as pim
from scipy import stats
% matplotlib inline
tam = 256 # tamaño matriz
dx = 0.01 # resolución (m/pixel)
x = np.arange(-dx*tam/2,dx*tam/2,dx) # coordenadas espaciales
X , Y = np.meshgrid(x,x) # espacio bidimensional
A1 = 1. # amplitud en unidades arbitrarias
f1 = 1. # frecuencia espacial (1/m)
g1 = A1*np.sin(2*np.pi*f1*X) # Imagen en el espacio "espacial"
ftg1 = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(g1)))*dx**2 # Transformada de Fourier, espacio frecuencial
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.imshow(abs(g1), cmap='gray')
plt.title('Espacio espacial')
plt.subplot(1,2,2)
plt.imshow(abs(ftg1), cmap='gray')
plt.title('Amplitud espacio frecuencial')
tam = 256 # tamaño matriz
dx = 0.01 # resolución (m/pixel)
x = np.arange(-dx*tam/2,dx*tam/2,dx) # coordenadas espaciales
X , Y = np.meshgrid(x,x) # espacio bidimensional
A1 = 1. # amplitud en unidades arbitrarias
f1 = 1. # frecuencia espacial (1/m)
gx = A1*np.sin(2*np.pi*f1*X) # Seno en la dirección horizontal
gy = A1*np.sin(2*np.pi*f1*Y) # Seno en la dirección vertical
g = gx + gy # Superposición
ftg = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(g)))*dx**2 # Transformada de Fourier, espacio frecuencial
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.imshow(g, cmap='gray')
plt.title('Espacio espacial')
plt.subplot(1,2,2)
plt.imshow(abs(ftg), cmap='gray')
plt.title('Amplitud espacio frecuencial')
tam = 256 # tamaño matriz
dx = 0.01 # resolución (m/pixel)
x = np.arange(-dx*tam/2,dx*tam/2,dx) # coordenadas espaciales
X , Y = np.meshgrid(x,x) # espacio bidimensional
A1 = 1. # amplitud en unidades arbitrarias
f1 = 1. # frecuencia espacial (1/m)
gx = A1*np.sin(2*np.pi*2*f1*X) # Seno en la dirección horizontal
gy = A1*np.sin(2*np.pi*4*f1*Y) # Seno en la dirección vertical
gd = 2*A1*np.cos(2*np.pi*f1*(X + Y)) # CCoceno en la dirección diagonal
g = gx + gy + gd # Superposición
ftg = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(g)))*dx**2 # Transformada de Fourier, espacio frecuencial
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.imshow(g, cmap='gray')
plt.title('Espacio espacial')
plt.subplot(1,2,2)
plt.imshow(abs(ftg), cmap='gray')
plt.title('Amplitud espacio frecuencial')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the default solution on dev
Step2: Evaluate the default output
|
<ASSISTANT_TASK:>
Python Code:
from default import *
import os, sys
model = Seq2Seq(build=False)
model.load(os.path.join('data', 'seq2seq_E049.pt'))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
# loading test dataset
test_iter = loadTestData(os.path.join('data', 'input', 'dev.txt'), model.fields['src'],
device=device, linesToLoad=sys.maxsize)
results = translate(model, test_iter) # Warning: will take >5mins depending on your machine
print("\n".join(results))
from bleu_check import bleu
ref_t = []
with open(os.path.join('data','reference','dev.out')) as r:
ref_t = r.read().strip().splitlines()
print(bleu(ref_t, results))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Feature Sets
Step4: Task 1
Step8: Features that have strong positive or negative correlations with the target will add information to our model. We can use the correlation matrix to find such strongly correlated features.
Step9: Spend 5 minutes searching for a good set of features and training parameters. Then check the solution to see what we chose. Don't forget that different features may require different learning parameters.
Step10: Solution
Step11: Task 2
Step12: Try creating some synthetic features that do a better job with latitude.
Step13: Solution
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
def preprocess_features(california_housing_dataframe):
Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
output_targets = pd.DataFrame()
# Scale the target to be in units of thousands of dollars.
output_targets["median_house_value"] = (
california_housing_dataframe["median_house_value"] / 1000.0)
return output_targets
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
correlation_dataframe = training_examples.copy()
correlation_dataframe["target"] = training_targets["median_house_value"]
correlation_dataframe.corr()
def construct_feature_columns(input_features):
Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
Trains a linear regression model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period,
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
#
# Your code here: add your features of choice as a list of quoted strings.
#
minimal_features = [
]
assert minimal_features, "You must select at least one feature!"
minimal_training_examples = training_examples[minimal_features]
minimal_validation_examples = validation_examples[minimal_features]
#
# Don't forget to adjust these parameters.
#
train_model(
learning_rate=0.001,
steps=500,
batch_size=5,
training_examples=minimal_training_examples,
training_targets=training_targets,
validation_examples=minimal_validation_examples,
validation_targets=validation_targets)
minimal_features = [
"median_income",
"latitude",
]
minimal_training_examples = training_examples[minimal_features]
minimal_validation_examples = validation_examples[minimal_features]
_ = train_model(
learning_rate=0.01,
steps=500,
batch_size=5,
training_examples=minimal_training_examples,
training_targets=training_targets,
validation_examples=minimal_validation_examples,
validation_targets=validation_targets)
plt.scatter(training_examples["latitude"], training_targets["median_house_value"])
#
# YOUR CODE HERE: Train on a new data set that includes synthetic features based on latitude.
#
def select_and_transform_features(source_df):
LATITUDE_RANGES = zip(range(32, 44), range(33, 45))
selected_examples = pd.DataFrame()
selected_examples["median_income"] = source_df["median_income"]
for r in LATITUDE_RANGES:
selected_examples["latitude_%d_to_%d" % r] = source_df["latitude"].apply(
lambda l: 1.0 if l >= r[0] and l < r[1] else 0.0)
return selected_examples
selected_training_examples = select_and_transform_features(training_examples)
selected_validation_examples = select_and_transform_features(validation_examples)
_ = train_model(
learning_rate=0.01,
steps=500,
batch_size=5,
training_examples=selected_training_examples,
training_targets=training_targets,
validation_examples=selected_validation_examples,
validation_targets=validation_targets)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.4,
random_state=101,
stratify=y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The class <code>plot_error_surfaces</code> is just to help you visualize the data space and the parameter space during training and has nothing to do with PyTorch.
Step2: Set the random seed
Step3: <!--Empty Space for separating topics-->
Step4: Make <code>Data</code> object
Step5: <!--Empty Space for separating topics-->
Step6: Create a logistic regression object and print the parameters
Step7: Create a <code>plot_error_surfaces</code> object to visualize the data space and the parameter space during training
Step8: Create <code>DataLoader</code> object, cost or criterion function and <code>optimizer</code>
Step9: <!--Empty Space for separating topics-->
Step10: Get the actual class of each sample and calculate the accuracy on the test data.
|
<ASSISTANT_TASK:>
Python Code:
# Import the libraries we need for this lab
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
# Create class for plotting and the function for plotting
class plot_error_surfaces(object):
# Construstor
def __init__(self, w_range, b_range, X, Y, n_samples = 30, go = True):
W = np.linspace(-w_range, w_range, n_samples)
B = np.linspace(-b_range, b_range, n_samples)
w, b = np.meshgrid(W, B)
Z = np.zeros((30, 30))
count1 = 0
self.y = Y.numpy()
self.x = X.numpy()
for w1, b1 in zip(w, b):
count2 = 0
for w2, b2 in zip(w1, b1):
Z[count1, count2] = np.mean((self.y - (1 / (1 + np.exp(-1 * (w2 * self.x + b2))))) ** 2)
count2 += 1
count1 += 1
self.Z = Z
self.w = w
self.b = b
self.W = []
self.B = []
self.LOSS = []
self.n = 0
if go == True:
plt.figure()
plt.figure(figsize = (7.5, 5))
plt.axes(projection = '3d').plot_surface(self.w, self.b, self.Z, rstride = 1, cstride = 1, cmap = 'viridis', edgecolor = 'none')
plt.title('Loss Surface')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
plt.figure()
plt.title('Loss Surface Contour')
plt.xlabel('w')
plt.ylabel('b')
plt.contour(self.w, self.b, self.Z)
plt.show()
# Setter
def set_para_loss(self, model, loss):
self.n = self.n + 1
self.W.append(list(model.parameters())[0].item())
self.B.append(list(model.parameters())[1].item())
self.LOSS.append(loss)
# Plot diagram
def final_plot(self):
ax = plt.axes(projection = '3d')
ax.plot_wireframe(self.w, self.b, self.Z)
ax.scatter(self.W, self.B, self.LOSS, c = 'r', marker = 'x', s = 200, alpha = 1)
plt.figure()
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
# Plot diagram
def plot_ps(self):
plt.subplot(121)
plt.ylim
plt.plot(self.x, self.y, 'ro', label = "training points")
plt.plot(self.x, self.W[-1] * self.x + self.B[-1], label = "estimated line")
plt.plot(self.x, 1 / (1 + np.exp(-1 * (self.W[-1] * self.x + self.B[-1]))), label = 'sigmoid')
plt.xlabel('x')
plt.ylabel('y')
plt.ylim((-0.1, 2))
plt.title('Data Space Iteration: ' + str(self.n))
plt.show()
plt.subplot(122)
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.title('Loss Surface Contour Iteration' + str(self.n))
plt.xlabel('w')
plt.ylabel('b')
# Plot the diagram
def PlotStuff(X, Y, model, epoch, leg = True):
plt.plot(X.numpy(), model(X).detach().numpy(), label = 'epoch ' + str(epoch))
plt.plot(X.numpy(), Y.numpy(), 'r')
if leg == True:
plt.legend()
else:
pass
# Set random seed
torch.manual_seed(0)
# Create the data class
class Data(Dataset):
# Constructor
def __init__(self):
self.x = torch.arange(-1, 1, 0.1).view(-1, 1)
self.y = torch.zeros(self.x.shape[0], 1)
self.y[self.x[:, 0] > 0.2] = 1
self.len = self.x.shape[0]
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get items
def __len__(self):
return self.len
# Create Data object
data_set = Data()
# Create logistic_regression class
class logistic_regression(nn.Module):
# Construstor
def __init__(self,n_inputs):
super(logistic_regression, self).__init__()
self.linear = nn.Linear(n_inputs, 1)
# Prediction
def forward(self, x):
yhat = torch.sigmoid(self.linear(x))
return yhat
# Create model object and print parameter
model = logistic_regression(1)
print("The parameters: ", model.state_dict())
# Create plot_error_surfaces object
get_surface = plot_error_surfaces(15, 13, data_set[:][0], data_set[:][1], 30)
# Create DataLoader, Cost Function, Optimizer
trainloader = DataLoader(dataset = data_set, batch_size = 3)
criterion_rms = nn.MSELoss()
learning_rate = 2
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
# Train the model
def train_model(epochs):
for epoch in range(epochs):
for x, y in trainloader:
yhat = model(x)
loss = criterion_rms(yhat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
get_surface.set_para_loss(model, loss.tolist())
if epoch % 20 == 0:
get_surface.plot_ps()
train_model(100)
# Make the Prediction
yhat = model(data_set.x)
label = yhat > 0.5
print("The accuracy: ", torch.mean((label == data_set.y.type(torch.ByteTensor)).type(torch.float)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ... some imports to create some random data ...
Step2: Create a data file object
Step3: h5d is now an empty hdf5 file, holding only some qkit structure
Step4: Now use qviewkit to open and view the file
Step5: Now we get to the important stuff
Step6: A value_vector ie is a 1-dim vector measured values
Step7: A value_matrix is a 2-dim tuple with two cooardinates with measurement values
Step8: A value_box is a 3-dim tuple with three cooardinates with measurement values
Step9: For free text, like settings or commments there is a special dataset which can be appended to add data
Step10: If one want to plot a arbitrary dataset vs another arbitrary dataset use a view
Step11: Finally add some data to the file
Step12: Close the file at the end
|
<ASSISTANT_TASK:>
Python Code:
import qkit
from qkit.storage import store
## for random data
from numpy.random import rand
from numpy import linspace,arange
import time
## number of points
nop = 101
h5d = store.Data(name='NewFancyData',mode = "a")
print(h5d.get_filepath())
h5d.add_comment("New data has been created ....")
# add_coordinate() <- for measurement boundaries/steps
# options: name (mandatory)
# : unit = "" (optional, default is "a.u.")
# : comment = "" (optional, default is "")
# : folder='data' | 'analysis' (optional, default is "data")
f_co = h5d.add_coordinate('frequency', unit = "Hz", comment = "VNA frequency scan")
I_co = h5d.add_coordinate('current', unit = "A", comment = "magnetic field current")
P_co = h5d.add_coordinate('power', unit = "dBm", comment = "microwave power")
# add_value_vector() <- for measurement data
# options: name (mandatory)
# : x = X (optional) coordinate vector in x direction, default: None
# : unit = "" (optional, default is "a.u.")
# : comment = "" (optional, default is "")
# : folder='data' | 'analysis' (optional, default is "data")
T_vec = h5d.add_value_vector('temperature', x = None,
unit = "K", comment = "save temperature values")
Tc_vec = h5d.add_value_vector('critical_temperature', x = I_co,
unit = "K", folder='analysis' ,comment = "save temperature values")
# add_value_matrix() <- for measurement data
# convention: the last coordiante should be the one with the fastest changes:
# e.g. for a VNA scan x= magnetic field y= transmission frequency
#
# options: name (mandatory)
# : x = X (optional) coordinate vector in x direction, default: None
# : y = Y (mandatory) coordinate vector in y direction / fastest changes
# : unit = "" (optional, default is "a.u.")
# : comment = "" (optional, default is "")
# : folder='data' | 'analysis' (optional, default is "data")
amp_mx = h5d.add_value_matrix('amplitude', x = I_co , y = f_co,
unit = "V", comment = "magic data")
pha_mx = h5d.add_value_matrix('phase', x = I_co , y = f_co,
unit = "rad", comment = "more magic data!")
# add_value_box() <- for measurement data
# options: name (mandatory)
# : x = X (optional) coordinate vector in x direction, default: None
# : y = Y (optional) coordinate vector in y direction
# : z = Z (mandatory) coordinate vector in y direction / fastest changes
# : unit = "" (optional, default is "a.u.")
# : comment = "" (optional, default is "")
# : folder='data' | 'analysis' (optional, default is "data")
amp_bx = h5d.add_value_box('amplitude', x = I_co , y = f_co, z= P_co,
unit = "V", comment = "magic data")
pha_bx = h5d.add_value_box('phase', x = I_co , y = f_co, z= P_co,
unit = "rad", comment = "more magic data!")
#string array
#add_textlist()
#options: name (mandatory)
# : comment = "" (optional)
# : folder="data" (optional)
# use the append method to add the text
settings = h5d.add_textlist("settings",comment = "my settings")
settings.append(u"vnapower = 10dBm")
settings.append(u"fridge attenuation=50db\n data jumps like Van Halen.")
### Add a view on Data:
TvsTc_view = h5d.add_view("f_vs_I", x = f_co, y = I_co)
TvsTc_view.add(x=T_vec,y=Tc_vec)
Laspect_view = h5d.add_view("locked_aspect", x = f_co, y = f_co,
view_params={'aspect':1.0, 'bgcolor':(100,200,100)})
# now we add the coordinate data to the file
fs = linspace(1e9,5e9,nop)
Is = linspace(0e-3,10e-3,nop)
f_co.add(fs)
I_co.add(Is)
for i in arange(nop):
#time.sleep(10)
amp = rand(nop)
pha = rand(nop)
amp_mx.append(amp)
pha_mx.append(pha)
T_vec.append(float(rand(1)))
Tc_vec.append(float(rand(1)))
h5d.close_file()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's look at the columns in the database.
Step2: So there are the following things in the dataset.
Step3: A quarter of the city's population approximately lives in slums.
Step4: 3. Which circles have the maximum number of slums?
Step5: Circle 4 - which is the old city quarter of Hyderabad has the maximum number of slums in terms of numbers.
Step6: Because the data is faulty, there are slums whose ward numbers is empty, ie 0.The remainder of the list are the ward numbers with maximum slums and on the right are the number of slums in them. That makes ward 108 have the maximum number of slums. On introspecting on a map(in CartoDB) - one is curious to know why is it that this ward has a highly fragmented number of slums, often not more than a couple of houses.
Step7: 6. What percentage of households in the slum are BPL(Below Poverty Level)?
Step8: That is high enough for us to consider the slum as a decent proxy for urban poverty in Hyderabad.
Step9: On closer introspection, it is found that the other datasets are per household and not population wise like tha literacy percentage. So we do the addition of the following columns into the dataframe. So we repeat the above calculation for percentage of households below poverty line.
Step10: Now we repeat this for the Caste Parameters.
Step11: And for the type of structures..
Step12: .. And for the kind of tenure
Step13: and finally for the number of years the residents have stayed..
Step14: Now, let us see how the slums dataframe looks like
Step15: We now have the data with all the calculations necessary made for analysis and now we begin.
Step16: 8. Which slums have the highest percentage of households that belong to the SC Caste?
Step17: 9. Which slums have the highest percentage of households that belong to the ST Caste?
Step18: 10. Which slums have the highest percentage of households that belong to the OBC Caste?
Step19: Now, we see what percentage of each of these castes live in slums.
Step20: STRUCTURES OF HOUSES
Step21: That's roughly about half of them. What about the remaining half?
Step22: That still leaves out a little bit more... and the remaining structures are made out of..
Step23: Kaccha houses.
Step24: 15. What percentage of Households have access to Possession Certificates?
Step25: 16. What percentage of Households are on Private Land?
Step26: 17. What percentage of Households are on Public Land?
Step27: 18. What percentage of Households are Renters?
Step28: 19. What percentage of Households don't fit in any of the other categories?
Step29: WHEN HAVE PEOPLE MOVED IN
Step30: 21. What percentage of Households have moved in the last one to three years?
Step31: 22. What percentage of Households have moved in the last three to five years?
Step32: 23. What percentage of Households have moved more than five years ago?
Step33: INCOME, EXPENDITURE, DEBT
Step34: 25. What is the average monthly expenditure of a slum household? (in rupees, 70 rupees make a dollar)
Step35: 26. What is the average debt per slum?(in rupees, 70 rupees make a dollar)
|
<ASSISTANT_TASK:>
Python Code:
#Starting out the basics.
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
slums= pd.read_csv("hyderabad_slum_master.csv")
slums.head() #The dataset is a spatialised one, hence the_geom column.
slums.columns
totalpopulation=slums['population'].sum()
print("The total number of people who live in slums in Hyderabad are",int(totalpopulation))
print("")
populationofhyderabad=6731790 #According to the 2011 census.
percentageofpopulation=(totalpopulation/populationofhyderabad)*100
print("The percentage of people who live in slums is",(percentageofpopulation))
print("The total number of slum households is",slums['households'].sum())
print("")
print("The average number of households in a slum is",slums['households'].mean())
print("")
print("The average family size is", (slums['population'].sum()/slums['households'].sum()))
circle_count=slums['circle_number'].value_counts()
circle_count
plt.style.use('ggplot')
circle_count.plot(kind='bar',x='Circle Number',y='Number of Slums',legend=False, figsize=(10,10))
slums['ward_number'].value_counts().head(20)
print("The total percentage of literacy in slums is",(slums['literacy_literates'].sum())/(slums['population'].sum())*100)
(slums['number_of_bpl_households'].sum())/(slums['households'].sum())*100
slums['percentageofliterates']=slums['literacy_literates']/slums['population']*100
slums['percentageofliterates'].mean()
slums['percentageofbplhouseholds']=slums['number_of_bpl_households']/slums['households']*100
slums['percentageofgeneral'] = slums['caste_general']/slums['households']*100
slums['percentageofminority']= slums['minority']/slums['households']*100
slums['percentageofobc'] = slums['caste_obc']/slums['households']*100
slums['percentageofsc'] = slums['caste_sc']/slums['households']*100
slums['percentageofst'] = slums['caste_st']/slums['households']*100
slums['percentageofpuccastructures'] = slums['structure_pucca']/slums['households']*100
slums['percentageofsemipuccastructures'] = slums['structure_semipucca']/slums['households']*100
slums['percentageofkucchastructures'] = slums['structure_kuccha']/slums['households']*100
slums['percentageoftenure_patta'] = slums['tenure_patta']/slums['households']*100
slums['percentageoftenure_pc'] = slums['tenure_possession_certificate']/slums['households']*100
slums['percentageoftenure_private'] = slums['tenure_private_land']/slums['households']*100
slums['percentageoftenure_public'] = slums['tenure_public']/slums['households']*100
slums['percentageoftenure_renters'] = slums['tenure_rented']/slums['households']*100
slums['percentageoftenure_other'] = slums['tenure_others']/slums['households']*100
slums['percentageoftenure_0to1'] = slums['zerotoone_years_of_stay']/slums['households']*100
slums['percentageoftenure_1to3'] = slums['onetothree_years_of_stay']/slums['households']*100
slums['percentageoftenure_3to5'] = slums['threetofive_years_of_stay']/slums['households']*100
slums['percentageoftenure_morethan5'] = slums['morethanfive_years_of_stay']/slums['households']*100
slums.head()
slums.columns
slums.sort_values(by='percentageofgeneral').head(5)
slums.sort_values(by='percentageofsc').head(5)
slums.sort_values(by='percentageofst').head(5)
slums.sort_values(by='percentageofobc').head(5)
a=slums['percentageofgeneral'].mean()
b=slums['percentageofsc'].mean()
c=slums['percentageofst'].mean()
d=slums['percentageofobc'].mean()
print(a,b,c,d)
plt.style.use('ggplot')
series=pd.Series([a,b,c,d], index=['General', 'SC', 'ST','OBC'], name='Caste Percentages')
series.plot.pie(figsize=(6, 6))
a=slums['percentageofpuccastructures'].mean()
b=slums['percentageofsemipuccastructures'].mean()
c=slums['percentageofkucchastructures'].mean()
plt.style.use('ggplot')
series=pd.Series([a,b,c], index=['Pucca Houses', 'Semipucca Houses', 'Kuccha Houses'], name='Household Structure Type')
series.plot.pie(figsize=(6, 6))
a=slums['percentageoftenure_patta'].mean()
b=slums['percentageoftenure_pc'].mean()
c=slums['percentageoftenure_private'].mean()
d=slums['percentageoftenure_public'].mean()
e=slums['percentageoftenure_renters'].mean()
f=slums['percentageoftenure_other'].mean()
plt.style.use('ggplot')
series=pd.Series([a,b,c,d,e,f], index=['Pattas', 'PC', 'Private Land','Public Land','Renters','Others'], name='Tenuree Type')
series.plot.pie(figsize=(6, 6))
a=slums['percentageoftenure_0to1'].mean()
b=slums['percentageoftenure_1to3'].mean()
c=slums['percentageoftenure_3to5'].mean()
d=slums['percentageoftenure_morethan5'].mean()
plt.style.use('ggplot')
series=pd.Series([a,b,c,d], index=['Zero to One Years', 'One to Three Years', 'Three to Five Years','More than Five Years'])
series.plot.pie(figsize=(6, 6))
slums['avg_monthly_income'].mean()
slums['avg_monthly_expenditure'].mean()
slums['debts_outstanding'].mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: No the syntax error in the first line leads to immediate termination of the program by raising a SyntaxError Excpetion.
Step2: apparently not the line containing the syntax error is not executed but needs to be parsed anyway, which is not possible if the line contains a syntax error.
|
<ASSISTANT_TASK:>
Python Code:
definitely broken syntax :)
print "after broken syntax" # Will this be executed?
def i_contain_broken_syntax():
definitely broken syntax :)
print "after broken syntax" # Will this be executed?
def f():
print("This is a little demonstration")
print("that the Jupyter Notebook")
print("has a decent editor")
print("Because I am pretty sure that only the error")
print("is not enough if you are new to this")
print("<--- What's that arrow? A tab character? Thank Jupyter!")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
labels = [0, 6, 5, 4, 2]
def g(labels):
return tf.one_hot(indices=labels, depth=10, on_value=1, off_value=0, axis=-1)
result = g(labels.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The pure drift wave
Step2: The difference in the two solutions are just the sign of the square-root
Step3: This is cumbersome to work with.
Step4: The difference in the two solutions are just the sign of the square-root.
Step5: This also gives quite the mess...
|
<ASSISTANT_TASK:>
Python Code:
from sympy import init_printing
from sympy import Eq, I
from sympy import re, im
from sympy import symbols
from sympy.solvers import solve
from IPython.display import display
from sympy import latex
om = symbols('omega')
omI = symbols('omega_i', real=True)
omStar = symbols('omega_S', real=True)
sigmaPar = symbols('sigma', positive=True)
b = symbols('b', real=True)
init_printing()
LHS = om*(om-omI)+I*sigmaPar*(om-omStar + b*(om-omI))
RHS = 0
eq = Eq(LHS, RHS)
display(eq)
sol1, sol2 = solve(eq, om)
display(sol1)
display(sol2)
sol1Re = re(sol1)
sol1Im = im(sol1)
display(sol1Re)
display(sol1Im)
LHS = om**2 + I*sigmaPar*(om*(1+b)-omStar)
RHS = 0
eq = Eq(LHS, RHS)
display(eq)
sol1, sol2 = solve(eq, om)
display(sol1)
display(sol2)
sol1Re = re(sol1.expand())
sol1Im = im(sol1.expand())
real = Eq(symbols("I"),sol1Im.simplify())
imag = Eq(symbols("R"),sol1Re.simplify())
display(real)
display(imag)
print(latex(real))
print(latex(imag))
sol2Re = re(sol2.expand())
sol2Im = im(sol2.expand())
display(Eq(symbols("I"),sol2Im.simplify()))
display(Eq(symbols("R"),sol2Re.simplify()))
# NOTE: Do not confuse om_I with om_i
om_I = symbols('omega_I', real=True)
om_R = symbols('omega_R', real=True)
LHSSplit = LHS.subs(om, om_R + I*om_I)
display(Eq(LHS, re(LHSSplit)+I*im(LHSSplit)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Examples
Step4: Anchors and References
Step7: Full example
Step9: Inserting Keys and Comments
|
<ASSISTANT_TASK:>
Python Code:
import ruamel.yaml
ruamel.yaml
ruamel
dir(ruamel)
inp = \
# example
name:
# details
family: Goda # Very uncommon
given: Satish # One of the siblings (Comman name)
print(inp)
help(ruamel.yaml.load)
code = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)
code
code['name']['given']
code['name']['family']
import ruamel.yaml
inp = \
- &CENTER {x: 1, y: 2}
- &LEFT {x: 0, y: 2}
- &BIG {r: 10}
- &SMALL {r: 1}
code = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)
code
type(code)
dir(code)
code.anchor
code.anchor.value
code.keys()
for item in code:
print(item)
for item in code:
print(item.anchor.value, item.keys())
import ruamel.yaml
inp = \
- &CENTER {x: 1, y: 2}
- &LEFT {x: 0, y: 2}
- &BIG {r: 10}
- &SMALL {r: 1}
# All the following maps are equal:
# Explicit keys
- x: 1
y: 2
r: 10
label: center/big
# Merge one map
- <<: *CENTER
r: 10
label: center/big
# Merge multiple maps
- <<: [*CENTER, *BIG]
label: center/big
# Override
- <<: [*BIG, *LEFT, *SMALL]
x: 1
label: center/big
data = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)
data
# Modifying the merge order of the aliases
inp = \
- &CENTER {x: 1, y: 2}
- &LEFT {x: 0, y: 2}
- &BIG {r: 10}
- &SMALL {r: 1}
# All the following maps are equal:
# Explicit keys
- x: 1
y: 2
r: 10
label: center/big
# Merge one map
- <<: *CENTER
r: 10
label: center/big
# Merge multiple maps
- <<: [*CENTER, *BIG]
label: center/big
# Override
- <<: [*SMALL, *BIG, *LEFT]
x: 1
label: center/small
data = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)
data
assert data[7]['y'] == 2
yaml_str = \
first_name: Art
occupation: Architect # This is an occupation comment
about: Art Vandelay is a fictional character that George invents...
data = ruamel.yaml.round_trip_load(yaml_str)
data
data.insert(1, 'last name', 'Vandelay', comment='new key')
print(ruamel.yaml.round_trip_dump(data))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'Date':['2019-01-01','2019-02-08','2019-02-08', '2019-03-08']})
df['Date'] = pd.to_datetime(df['Date'])
df['Date'] = df['Date'].dt.strftime('%b-%Y')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll start with the simplest approach
Step2: ...and a general way to tokenize a bunch of strings
Step3: Let's time it
Step4: ...and the same thing with 2 workers
Step5: How about if we put half the work in each worker?
Step6: So there's a lot of overhead in using parallel processing in Python.
Step7: That's quite fast! (Although still slower than single process.)
Step8: That's a bit faster still.
Step9: So creating the tokens, isn't taking the time, but returning them over the process boundary is.
Step10: Is numpy any faster?
Step11: Spacy
Step12: Let's see how long it takes to create a tokenizer in Spacy
Step13: Time tokenize in Spacy using a loop
Step14: ...and the same thing in parallel
Step15: ...and with more workers
Step16: ...and with creating the tokenizer in the child process
Step17: Let's try pipe
|
<ASSISTANT_TASK:>
Python Code:
path = untar_data(URLs.IMDB_SAMPLE)
df = pd.read_csv(path/'texts.csv')
df.head(2)
ss = L(list(df.text))
ss[0]
def delim_tok(s, delim=' '): return L(s.split(delim))
s = ss[0]
delim_tok(s)
def apply(func, items): return list(map(func, items))
%%timeit -n 2 -r 3
global t
t = apply(delim_tok, ss)
%%timeit -n 2 -r 3
parallel(delim_tok, ss, n_workers=2, progress=False)
batches32 = [L(list(o)).map(str) for o in np.array_split(ss, 32)]
batches8 = [L(list(o)).map(str) for o in np.array_split(ss, 8 )]
batches = [L(list(o)).map(str) for o in np.array_split(ss, 2 )]
%%timeit -n 2 -r 3
parallel(partial(apply, delim_tok), batches, progress=False, n_workers=2)
%%timeit -n 2 -r 3
global t
t = parallel(noop, batches, progress=False, n_workers=2)
def f(x): return 1
%%timeit -n 2 -r 3
global t
t = parallel(f, batches, progress=False, n_workers=2)
def f(items):
o = [s.split(' ') for s in items]
return [s for s in items]
%%timeit -n 2 -r 3
global t
t = parallel(f, batches, progress=False, n_workers=2)
sarr = np.array(ss)
%%timeit -n 2 -r 3
global t
t = np.char.split(sarr)
from spacy.lang.en import English
def conv_sp(doc): return L(doc).map(str)
class SpTok:
def __init__(self):
nlp = English()
self.tok = nlp.Defaults.create_tokenizer(nlp)
def __call__(self, x): return L(self.tok(str(x))).map(conv_sp)
%%timeit -n 2 -r 3
SpTok()
nlp = English()
sp_tokenizer = nlp.Defaults.create_tokenizer(nlp)
def spacy_tok(s): return L(sp_tokenizer(str(s))).map(str)
%%timeit -r 3
global t
t = apply(spacy_tok, ss)
%%timeit -r 3
global t
t = parallel(partial(apply, spacy_tok), batches, progress=False, n_workers=2)
%%timeit -r 3
global t
t = parallel(partial(apply, spacy_tok), batches8, progress=False, n_workers=8)
def f(its):
tok = SpTok()
return [[str(o) for o in tok(p)] for p in its]
%%timeit -r 3
global t
t = parallel(f, batches8, progress=False, n_workers=8)
%%timeit -r 3
global t
t = L(nlp.tokenizer.pipe(ss)).map(conv_sp)
def f(its): return L(nlp.tokenizer.pipe(its)).map(conv_sp)
%%timeit -r 3
global t
t = parallel(f, batches8, progress=False, n_workers=8)
test_eq(chunked(range(12),n_chunks=4), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
test_eq(chunked(range(11),n_chunks=4), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]])
test_eq(chunked(range(10),n_chunks=4), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]])
test_eq(chunked(range( 9),n_chunks=3), [[0, 1, 2], [3, 4, 5], [6, 7, 8]])
%%timeit -r 3
global t
t = parallel_chunks(f, ss, n_workers=8, progress=False)
def array_split(arr, n): return chunked(arr, math.floor(len(arr)/n))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lidando com filhos e outros descendentes
Step2: Lidando com irmãos
|
<ASSISTANT_TASK:>
Python Code:
from bs4 import BeautifulSoup
import requests
req = requests.get("http://pythonscraping.com/pages/page3.html")
bs = BeautifulSoup(req.text, "html.parser")
bs.find({"span"})
bs.findAll({"span"})
for filho in bs.find("table", {"id":"giftList"}).children:
print(filho)
for irmao in bs.find("table", {"id":"giftList"}).tr.next_siblings:
print(irmao)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next make a function that will reset the board, in this case we'll store values as a list.
Step2: Now create a function to display the board, I'll use the num pad as the board reference.
Step3: Define a function to check for a win by comparing inputs in the board list. Note
Step4: Define function to check if the board is already full in case of a tie. (This is straightfoward with our board stored as a list)
Step5: Now define a function to get player input and do various checks on it.
Step6: Now have a function that takes in the player's choice (via the ask_player function) then returns the game_state.
Step7: Finally put it all together in a function to play the game.
Step8: Let's play!
|
<ASSISTANT_TASK:>
Python Code:
# Specifically for the iPython Notebook environment for clearing output.
from IPython.display import clear_output
# Global variables
board = [' '] * 10
game_state = True
announce = ''
# Note: Game will ignore the 0 index
def reset_board():
global board,game_state
board = [' '] * 10
game_state = True
def display_board():
''' This function prints out the board so the numpad can be used as a reference '''
# Clear current cell output
clear_output()
# Print board
print " "+board[7]+" |"+board[8]+" | "+board[9]+" "
print "------------"
print " "+board[4]+" |"+board[5]+" | "+board[6]+" "
print "------------"
print " "+board[1]+" |"+board[2]+" | "+board[3]+" "
def win_check(board, player):
''' Check Horizontals,Verticals, and Diagonals for a win '''
if (board[7] == board[8] == board[9] == player) or \
(board[4] == board[5] == board[6] == player) or \
(board[1] == board[2] == board[3] == player) or \
(board[7] == board[4] == board[1] == player) or \
(board[8] == board[5] == board[2] == player) or \
(board[9] == board[6] == board[3] == player) or \
(board[1] == board[5] == board[9] == player) or \
(board[3] == board[5] == board[7] == player):
return True
else:
return False
def full_board_check(board):
''' Function to check if any remaining blanks are in the board '''
if " " in board[1:]:
return False
else:
return True
def ask_player(mark):
''' Asks player where to place X or O mark, checks validity '''
global board
req = 'Choose where to place your: ' + mark
while True:
try:
choice = int(raw_input(req))
except ValueError:
print("Sorry, please input a number between 1-9.")
continue
if board[choice] == " ":
board[choice] = mark
break
else:
print "That space isn't empty!"
continue
def player_choice(mark):
global board,game_state,announce
#Set game blank game announcement
announce = ''
#Get Player Input
mark = str(mark)
# Validate input
ask_player(mark)
#Check for player win
if win_check(board,mark):
clear_output()
display_board()
announce = mark +" wins! Congratulations"
game_state = False
#Show board
clear_output()
display_board()
#Check for a tie
if full_board_check(board):
announce = "Tie!"
game_state = False
return game_state,announce
def play_game():
reset_board()
global announce
# Set marks
X='X'
O='O'
while True:
# Show board
clear_output()
display_board()
# Player X turn
game_state,announce = player_choice(X)
print announce
if game_state == False:
break
# Player O turn
game_state,announce = player_choice(O)
print announce
if game_state == False:
break
# Ask player for a rematch
rematch = raw_input('Would you like to play again? y/n')
if rematch == 'y':
play_game()
else:
print "Thanks for playing!"
play_game()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Missão
Step2: Teste da Solução
|
<ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
class UniqueChars(object):
def has_unique_chars(self, string):
# Implemente aqui sua solução
%%writefile missao1.py
from nose.tools import assert_equal
class TestUniqueChars(object):
def test_unique_chars(self, func):
assert_equal(func(None), False)
assert_equal(func(''), True)
assert_equal(func('foo'), False)
assert_equal(func('bar'), True)
print('Sua solução foi executada com sucesso! Parabéns!')
def main():
test = TestUniqueChars()
try:
unique_chars = UniqueChars()
test.test_unique_chars(unique_chars.has_unique_chars)
except NameError:
pass
if __name__ == '__main__':
main()
%run -i missao1.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We create a three-dimansional vector field with domain that spans between
Step2: Now, we can create a vector field object and initialise it so that
Step3: Please note, that in this case we provided a field value as a field argument, which internally calls a set method explained in other tutorials.
Step4: This vector field can now be saved in an OOMMF omf file, by using write_oommf_file method and providing a filename.
Step5: We can now see that, the OOMMF file is saved
Step6: Now when we have the OOMMF vector field file, we can read it, which will create a different Field object.
Step7: As expected, two fields must have exactly the same values at all nodes
Step8: Finally we can delete the OOMFF file used in this tutorial.
|
<ASSISTANT_TASK:>
Python Code:
from oommffield import Field, read_oommf_file
cmin = (0, 0, 0)
cmax = (100e-9, 100e-9, 5e-9)
d = (5e-9, 5e-9, 5e-9)
dim = 3
def m_init(pos):
x, y, z = pos
return (x+1, x+y+2, z+2)
field = Field(cmin, cmax, d, dim=dim, value=m_init)
#PYTEST_VALIDATE_IGNORE_OUTPUT
%matplotlib inline
fig = field.plot_slice('z', 2.5e-9, xsize=8)
filename = 'vector_field.omf'
field.write_oommf_file(filename)
!ls *.omf
field_read = read_oommf_file('vector_field.omf')
(field.f == field_read.f).all()
!rm vector_field.omf
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Pair Correlation Function (or Radial Distribution Function)
Step2: Compute!
Step3: Plot!
Step4: Save the everything for later
|
<ASSISTANT_TASK:>
Python Code:
xyz = pd.read_hdf('xyz.hdf5', 'xyz')
twobody = pd.read_hdf('twobody.hdf5', 'twobody')
from scipy.integrate import cumtrapz
def pcf(A, B, a, twobody, dr=0.05, start=0.5, end=7.5):
'''
Pair correlation function between two atom types.
'''
distances = twobody.loc[(twobody['symbols'] == A + B) |
(twobody['symbols'] == B + A), 'distance'].values
bins = np.arange(start, end, dr)
bins = np.append(bins, bins[-1] + dr)
hist, bins = np.histogram(distances, bins)
#...
#...
# r = ?
# g = ?
# n = ?
return pd.DataFrame.from_dict({'r': None, 'g': None, 'n': None})
%load -s pcf, snippets/pcf.py
A = 'O'
B = 'O'
df = pcf(A, B, a, twobody)
import seaborn as sns
sns.set_context('poster', font_scale=1.3)
sns.set_style('white')
sns.set_palette('colorblind')
# Lets modify a copy of the data for plotting
plotdf = df.set_index('r')
plotdf.columns = ['PCF', 'Pair Count']
# Generate the plot
ax = plotdf.plot(secondary_y='Pair Count')
ax.set_ylabel('Pair Correlation Function ({0}, {1})'.format(A, B))
ax.right_ax.set_ylabel('Pair Count ({0}, {1})'.format(A, B))
ax.set_xlabel('Distance ($\AA$)')
patches, labels = ax.get_legend_handles_labels()
patches2, labels2 = ax.right_ax.get_legend_handles_labels()
legend = ax.legend(patches+patches2, labels+labels2, loc='upper center', frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
df1 = pcf('O', 'O', a, twobody)
df2 = pcf('O', 'H', a, twobody)
df3 = pcf('H', 'H', a, twobody)
df = pd.concat((df1, df2, df3), axis=1)
df.columns = ['$g_{OO}$', '$n_{OO}$', '$r$', '$g_{OH}$', '$n_{OH}$', 'del1', '$g_{HH}$', '$n_{HH}$', 'del2']
del df['del1']
del df['del2']
df.set_index('$r$', inplace=True)
ax = df.plot(secondary_y=['$n_{OO}$', '$n_{OH}$', '$n_{HH}$'])
ax.set_ylabel('Pair Correlation Function ($g_{AB}$)')
ax.right_ax.set_ylabel('Pairwise Count ($n_{AB}$)')
ax.set_xlabel('Distance ($\AA$)')
ax.set_ylim(0, 5)
ax.right_ax.set_ylim(0, 20)
patches, labels = ax.get_legend_handles_labels()
patches2, labels2 = ax.right_ax.get_legend_handles_labels()
legend = ax.legend(patches+patches2, labels+labels2, loc='upper right', frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Save the figure
fig = ax.get_figure()
fig.savefig('pcf.pdf')
# Save the pcf data
store = pd.HDFStore('pcf.hdf5', mode='w')
store.put('pcf', df)
store.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: We can see that the auto-covariance function starts at a high value and decreases quickly into a long noisy tail which fluctuates around zero. The high values at short lag times indicate that there are strong correlations at short time scales, as expected. However, even though the tail looks uninteresting, it can bear important information about the statistics of your data. Small systematic deviations from 0 in the tail can be a hint that long-term correlations exist in your system. On the other hand, if there is no sign of a systematic deviation from 0 in the tail, this usually means that the correlation is decaying well within the simulation time, and that the statistics are good enough to estimate an error. In the above example, the correlation quickly decays to zero. Despite the noise in the tail, the statistics seem very reasonable.
Step3: Since the auto-covariance function is very well matched with an exponential, this analysis already gives us a reasonable estimate of the autocorrelation time. Here we have the luxury to have an analytical ACF at hand which describes the statistics of the simple AR(1) process, which generated our simulation data. It is in fact exponential and agrees very well with the numerical ACF. In practice, however, you will neither know an analytical ACF, nor know if the ACF is exponential, at all. In many systems, the ACF is more or less exponential, but this is not necessarily the case.
Step4: In this plot, we have the analytical solution at hand, which is a luxury not present in real applications. For the analysis, we therefore need to act as if there was no analytic solution
Step5: Using this value of $j_\mathrm{max}$, we can calculate the integrated autocorrelation time $\hat{\tau}_{X, \mathrm{int}}$ and estimate the SEM with equation (5).
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
import sys
import logging
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
np.random.seed(43)
def ar_1_process(n_samples, c, phi, eps):
'''
Generate a correlated random sequence with the AR(1) process.
Parameters
----------
n_samples: :obj:`int`
Sample size.
c: :obj:`float`
Constant term.
phi: :obj:`float`
Correlation magnitude.
eps: :obj:`float`
Shock magnitude.
'''
ys = np.zeros(n_samples)
if abs(phi) >= 1:
raise ValueError("abs(phi) must be smaller than 1.")
# draw initial value from normal distribution with known mean and variance
ys[0] = np.random.normal(loc=c / (1 - phi), scale=np.sqrt(eps**2 / (1 - phi**2)))
for i in range(1, n_samples):
ys[i] = c + phi * ys[i - 1] + np.random.normal(loc=0., scale=eps)
return ys
# generate simulation data using the AR(1) process
logging.info("Generating data sets for the tutorial ...")
N_SAMPLES = 100000
C_1 = 2.0
PHI_1 = 0.85
EPS_1 = 2.0
time_series_1 = ar_1_process(N_SAMPLES, C_1, PHI_1, EPS_1)
C_2 = 0.05
PHI_2 = 0.999
EPS_2 = 1.0
time_series_2 = ar_1_process(N_SAMPLES, C_2, PHI_2, EPS_2)
logging.info("Done")
fig = plt.figure(figsize=(10, 6))
plt.title("The first 1000 samples of both time series")
plt.plot(time_series_1[0:1000], label="time series 1")
plt.plot(time_series_2[0:1000], label="time series 2")
plt.xlabel("$i$")
plt.ylabel("$X_i$")
plt.legend()
plt.show()
# Numpy solution
time_series_1_centered = time_series_1 - np.average(time_series_1)
autocov = np.empty(1000)
for j in range(1000):
autocov[j] = np.dot(time_series_1_centered[:N_SAMPLES - j], time_series_1_centered[j:])
autocov /= N_SAMPLES
fig = plt.figure(figsize=(10, 6))
plt.gca().axhline(0, color="gray", linewidth=1)
plt.plot(autocov)
plt.xlabel("lag time $j$")
plt.ylabel("$\hat{R}^{XX}_j$")
plt.show()
from scipy.optimize import curve_fit
def exp_fnc(x, a, b):
return a * np.exp(-x / b)
N_MAX = 1000
j = np.arange(1, N_MAX)
j_log = np.logspace(0, 3, 100)
popt, pcov = curve_fit(exp_fnc, j, autocov[1:N_MAX], p0=[15, 10])
# compute analytical ACF of AR(1) process
AN_SIGMA_1 = np.sqrt(EPS_1 ** 2 / (1 - PHI_1 ** 2))
AN_TAU_EXP_1 = -1 / np.log(PHI_1)
an_acf_1 = AN_SIGMA_1**2 * np.exp(-j / AN_TAU_EXP_1)
fig = plt.figure(figsize=(10, 6))
plt.plot(j, autocov[1:N_MAX], "x", label="numerical ACF")
plt.plot(j, an_acf_1, "-.", linewidth=3, label="analytical ACF")
plt.plot(j_log, exp_fnc(j_log, popt[0], popt[1]), label="exponential fit")
plt.xlim((1, N_MAX))
plt.xscale("log")
plt.xlabel("lag time $j$")
plt.ylabel("$\hat{R}^{XX}_j$")
plt.legend()
plt.show()
print(f"Exponential autocorrelation time: {popt[1]:.2f} sampling intervals")
# compute the ACF
acf = autocov / autocov[0]
# integrate the ACF (suffix _v for vectors)
j_max_v = np.arange(1000)
tau_int_v = np.zeros(1000)
for j_max in j_max_v:
tau_int_v[j_max] = 0.5 + np.sum(acf[1:j_max + 1])
# plot
fig = plt.figure(figsize=(10, 6))
plt.plot(j_max_v[1:], tau_int_v[1:], label="numerical summing")
plt.plot(j_max_v[(1, -1),], np.repeat(AN_TAU_EXP_1, 2), "-.", label="analytical")
plt.xscale("log")
plt.xlabel(r"sum length $j_\mathrm{max}$")
plt.ylabel(r"$\hat{\tau}_{X, \mathrm{int}}$")
plt.legend()
plt.show()
C = 5.0
# determine j_max
j_max = 0
while j_max < C * tau_int_v[j_max]:
j_max += 1
# plot
fig = plt.figure(figsize=(10, 6))
plt.plot(j_max_v[1:], C * tau_int_v[1:])
plt.plot(j_max_v[1:], j_max_v[1:])
plt.plot([j_max], [C * tau_int_v[j_max]], "ro")
plt.xscale("log")
plt.ylim((0, 50))
plt.xlabel(r"sum length $j_\mathrm{max}$")
plt.ylabel(r"$C \times \hat{\tau}_{X, \mathrm{int}}$")
plt.show()
print(f"j_max = {j_max}")
tau_int = tau_int_v[j_max]
print(f"Integrated autocorrelation time: {tau_int:.2f} time steps\n")
N_eff = N_SAMPLES / (2 * tau_int)
print(f"Original number of samples: {N_SAMPLES}")
print(f"Effective number of samples: {N_eff:.1f}")
print(f"Ratio: {N_eff / N_SAMPLES:.3f}\n")
sem = np.sqrt(autocov[0] / N_eff)
print(f"Standard error of the mean: {sem:.4f}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quiz 1 - Number of rainy days
Step3: count(*)
Step5: fog max(maxtempi)
Step7: More about SQL's CAST function
Step8: Quiz 5 - Fixing Turnstile Data
Step9: updated_turnstile_110528.txt
Step10: C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn
Step11: More detail
Step12: 9 - Get Hourly Exits
Step13: Unnamed
Step14: Unnamed
|
<ASSISTANT_TASK:>
Python Code:
import pandas
import pandasql
def num_rainy_days(filename):
'''
This function should run a SQL query on a dataframe of
weather data.
The SQL query should return:
- one column and
- one row - a count of the `number of days` in the dataframe where
the rain column is equal to 1 (i.e., the number of days it
rained).
The dataframe will be titled 'weather_data'.
You'll need to provide the SQL query.
You might find SQL's count function useful for this exercise.
You can read more about it here:
https://dev.mysql.com/doc/refman/5.1/en/counting-rows.html
You might also find that interpreting numbers as integers or
floats may not work initially.
In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer). So for example, if we wanted
to cast the maxtempi column as an integer, we would actually write
something like where cast(maxtempi as integer) = 76, as opposed to
simply where maxtempi = 76.
You can see the weather data that we are passing in below:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q =
your query here
SELECT COUNT(*) FROM weather_data WHERE rain = 1;
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q.lower(), locals())
return rainy_days
import pandas
import pandasql
def max_temp_aggregate_by_fog(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return two columns and
two rows - whether it was foggy or not (0 or 1) and the max
maxtempi for that fog value (i.e., the maximum max temperature
for both foggy and non-foggy days). The dataframe will be
titled 'weather_data'. You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q =
SELECT fog, MAX(maxtempi)
FROM weather_data
GROUP BY fog;
#Execute your SQL command against the pandas frame
foggy_days = pandasql.sqldf(q.lower(), locals())
return foggy_days
import pandas
import pandasql
def avg_weekend_temperature(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - the average meantempi on days that are a Saturday
or Sunday (i.e., the the average mean temperature on weekends).
The dataframe will be titled 'weather_data' and you can access
the date in the dataframe via the 'date' column.
You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
Also, you can convert dates to days of the week via the 'strftime' keyword in SQL.
For example, cast (strftime('%w', date) as integer) will return 0 if the date
is a Sunday or 6 if the date is a Saturday.
You can see the weather data that we are passing in below:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q =
SELECT AVG(CAST(meantempi AS int))
FROM weather_data
WHERE CAST(strftime('%w', date) AS int) = 0 or CAST(strftime('%w', date) AS int) = 6;
#Execute your SQL command against the pandas frame
mean_temp_weekends = pandasql.sqldf(q.lower(), locals())
return mean_temp_weekends
import pandas
import pandasql
def avg_min_temperature(filename):
'''
This function should run a SQL query on a dataframe of
weather data. More specifically you want to find the average
minimum temperature (mintempi column of the weather dataframe) on
rainy days where the minimum temperature is greater than 55 degrees.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q =
SELECT AVG(CAST (mintempi AS int))
FROM weather_data
WHERE rain = 1 and CAST(MINTEMPI AS int) > 55;
#Execute your SQL command against the pandas frame
avg_min_temp_rainy = pandasql.sqldf(q.lower(), locals())
return avg_min_temp_rainy
import csv
def fix_turnstile_data(filenames):
'''
Filenames is a list of MTA Subway turnstile text files. A link to an example
MTA Subway turnstile text file can be seen at the URL below:
http://web.mta.info/developers/data/nyct/turnstile/turnstile_110507.txt
As you can see, there are numerous data points included in each row of the
a MTA Subway turnstile text file.
You want to write a function that will update each row in the text
file so there is only one entry per row. A few examples below:
A002,R051,02-00-00,05-28-11,00:00:00,REGULAR,003178521,001100739
A002,R051,02-00-00,05-28-11,04:00:00,REGULAR,003178541,001100746
A002,R051,02-00-00,05-28-11,08:00:00,REGULAR,003178559,001100775
Write the updates to a different text file in the format of "updated_" + filename.
For example:
1) if you read in a text file called "turnstile_110521.txt"
2) you should write the updated data to "updated_turnstile_110521.txt"
The order of the fields should be preserved. Remember to read through the
Instructor Notes below for more details on the task.
In addition, here is a CSV reader/writer introductory tutorial:
http://goo.gl/HBbvyy
You can see a sample of the turnstile text file that's passed into this function
and the the corresponding updated file by downloading these files from the resources:
Sample input file: turnstile_110528.txt
Sample updated file: solution_turnstile_110528.txt
'''
for name in filenames:
# create file input object `f_in` to work with "name" file.
# create file output object `f_out` to write to the new "updated_name" file.
with open(name, 'r') as f_in, open(''.join(['updated_',name]), 'w') as f_out:
# creater csv readers and writers based on our file objects
reader_in = csv.reader(f_in)
writer_out = csv.writer(f_out)
# Our reader in allows us to go through each line (row of the input data)
# and access its data with the standard Python syntax.
for row in reader_in:
for i in range(3, len(row), 5):
writer_out.writerow(row[0:3] + row[i:i+5])
return None
def create_master_turnstile_file(filenames, output_file):
'''
Write a function that
- takes the files in the list filenames, which all have the
columns 'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn', and
- consolidates them into one file located at output_file.
- There should be ONE row with the column headers, located at the top of the file.
- The input files do not have column header rows of their own.
For example, if file_1 has:
line 1 ...
line 2 ...
and another file, file_2 has:
line 3 ...
line 4 ...
line 5 ...
We need to combine file_1 and file_2 into a master_file like below:
'C/A, UNIT, SCP, DATEn, TIMEn, DESCn, ENTRIESn, EXITSn'
line 1 ...
line 2 ...
line 3 ...
line 4 ...
line 5 ...
'''
with open(output_file, 'w') as master_file:
master_file.write('C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn\n')
for filename in filenames:
with open(filename, 'r') as content:
# Write everything read from `content` ( which is rows) to file `master_file`
master_file.write(content.read())
return None
import pandas
def filter_by_regular(filename):
'''
This function should read the csv file located at filename into a pandas dataframe,
and filter the dataframe to only rows where the 'DESCn' column has the value 'REGULAR'.
For example, if the pandas dataframe is as follows:
,C/A,UNIT,SCP,DATEn,TIMEn,DESCn,ENTRIESn,EXITSn
0,A002,R051,02-00-00,05-01-11,00:00:00,REGULAR,3144312,1088151
1,A002,R051,02-00-00,05-01-11,04:00:00,DOOR,3144335,1088159
2,A002,R051,02-00-00,05-01-11,08:00:00,REGULAR,3144353,1088177
3,A002,R051,02-00-00,05-01-11,12:00:00,DOOR,3144424,1088231
The dataframe will look like below after filtering to only rows where DESCn column
has the value 'REGULAR':
0,A002,R051,02-00-00,05-01-11,00:00:00,REGULAR,3144312,1088151
2,A002,R051,02-00-00,05-01-11,08:00:00,REGULAR,3144353,1088177
'''
# Use pandas's read_csv function to read the csv file located at filename
turnstile_data = pandas.read_csv(filename)
# Use pandas's loc() function
turnstile_data = turnstile_data.loc[turnstile_data['DESCn'] == 'REGULAR']
return turnstile_data
import pandas
def get_hourly_entries(df):
'''
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative entry numbers to a count of entries since the last reading
(i.e., entries since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called ENTRIESn_hourly
2) Assign to the column the difference between ENTRIESn of the current row
and the previous row. If there is any NaN, fill/replace it with 1.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Examples of what your dataframe should look like at the end of this exercise:
C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly
0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 1
1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23
2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18
3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71
4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170
5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214
6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87
7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10
8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36
9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153
10 A002 R051 02-00-00 05-02-11 16:00:00 REGULAR 3145337 1088823 243
...
...
'''
# Actually you should use diff() function rather than shift(),
# shift() will return the previous value, not the difference between two value.
df['ENTRIESn_hourly'] = df['ENTRIESn'].diff().fillna(1)
return df
import pandas
def get_hourly_exits(df):
'''
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative exit numbers to a count of exits since the last reading
(i.e., exits since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called EXITSn_hourly
2) Assign to the column the difference between EXITSn of the current row
and the previous row. If there is any NaN, fill/replace it with 0.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Example dataframe below:
Unnamed: 0 C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly EXITSn_hourly
0 0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 0 0
1 1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23 8
2 2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18 18
3 3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71 54
4 4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170 44
5 5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214 42
6 6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87 11
7 7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10 3
8 8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36 89
9 9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153 333
'''
df['EXITSn_hourly'] = df['EXITSn'].diff().fillna(0)
return df
import pandas
def time_to_hour(time):
'''
Given an input variable time that represents time in the format of:
"00:00:00" (hour:minutes:seconds)
Write a function to extract the hour part from the input variable time
and return it as an integer. For example:
1) if hour is 00, your code should return 0
2) if hour is 01, your code should return 1
3) if hour is 21, your code should return 21
Please return hour as an integer.
'''
# Python string slicing, returns from the begining to position 1.
hour = int(time[:2])
return hour
import datetime
def reformat_subway_dates(date):
'''
The dates in our subway data are formatted in the format month-day-year.
The dates in our weather underground data are formatted year-month-day.
In order to join these two data sets together, we'll want the dates formatted
the same way. Write a function that takes as its input a date in the MTA Subway
data format, and returns a date in the weather underground format.
Hint:
There are a couple of useful functions in the datetime library that will
help on this assignment, called strptime and strftime.
More info can be seen here and further in the documentation section:
http://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
'''
# Notice that the year in the MTA Subway format is year without century (99, 00, 01)
date_formatted = datetime.datetime.strptime(date, '%m-%d-%y').strftime('%Y-%m-%d')
return date_formatted
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Set Validation features
Step3: Step 2
Step4: Setup TensorFlow
Step5: Features and Labels
Step6: Question 1
Step7: Step 3
Step8: Question 7
Step9: Question 8
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'train.p'
testing_file = 'test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_test, y_test = test['features'], test['labels']
### Replace each question mark with the appropriate value.
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(set(y_test))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization goes here.
import random
import numpy as np
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image, cmap="gray")
print(y_train[index])
### Preprocess the data here.
from sklearn.utils import shuffle
# Implement Min-Max scaling for image data
def normalize(image_data):
a = 0.01
b = 0.99
color_min = 0.0
color_max = 255.0
return a + ( ( (image_data - color_min) * (b - a) )/(color_max - color_min))
# Normalize train features and test features
X_train = normalize(X_train)
X_test = normalize(X_test)
X_train, y_train = shuffle(X_train, y_train)
from sklearn.model_selection import train_test_split
X_train = np.append(X_train, X_test, axis=0)
y_train = np.append(y_train, y_test, axis=0)
X_train, X_validation, y_train, y_validation = train_test_split(
X_train,
y_train,
test_size=0.02,
random_state=42)
from tensorflow.contrib.layers import flatten
import tensorflow as tf
model_name = 'lenet_report'
EPOCHS = 40
BATCH_SIZE = 120
def LeNet(x):
# Hyperparameters
mu = 0
sigma = 0.01
keep_prob = 1
# Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, keep_prob)
# Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
### Train your model here.
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './models/'+model_name)
print("Model saved")
with tf.Session() as sess:
print ('loading '+model_name+'...')
saver.restore(sess, './models/'+model_name)
print('loaded')
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
# load test images
from skimage import io
import numpy as np
import os
images = os.listdir("testImages/")
images.sort()
num_imgs = len(images)
test_imgs = np.uint8(np.zeros((num_imgs,32,32,3)))
labels = ['?', 29, 28, 33, 5, 14, 18, 17, 34]
for i, j in enumerate(images):
image = io.imread('./testImages/'+j)
test_imgs[i] = image
# Normalize train features and test features
test_imgs = normalize(test_imgs.reshape((-1, 32, 32, 3)).astype(np.float32))
import matplotlib.pyplot as plt
f, ax = plt.subplots(num_imgs, 1)
for i in range(num_imgs):
ax[i].imshow(test_imgs[i])
plt.setp(ax[i].get_xticklabels(), visible=False)
plt.setp(ax[i].get_yticklabels(), visible=False)
plt.show()
test_imgs.shape
import tensorflow as tf
model_name = 'lenet_report'
predictions = tf.nn.softmax(logits)
def classify_images(X_data):
sess = tf.get_default_session()
pred_vals = sess.run(predictions, feed_dict={x: X_data})
return pred_vals
with tf.Session() as sess:
print ('loading '+model_name+'...')
saver.restore(sess, './models/'+model_name)
predictions = classify_images(test_imgs)
top_k = sess.run(tf.nn.top_k(predictions, 5, sorted=True))
print("Predicted Labels:", np.argmax(predictions, 1))
print("Expected Labels: ", labels)
N = 5
ind = np.arange(N) # the x locations for the values
for i in range(5):
plt.figure(i)
values = top_k[0][i]
plt.bar(range(N), values, 0.40, color='g')
plt.ylabel('Probabilities')
plt.xlabel('Class Labels')
plt.title('Top {} Softmax Probabilities for test-image{}'.format(N, str(i+1)))
plt.xticks(ind+0.40, tuple(top_k[1][i]))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image and patch generation functions
Step2: Train a regression model to predict density
Step3: Plots for book
Step4: Actual image
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
print(tf.version.VERSION)
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import scipy.ndimage as ndimage
from skimage import draw
FULL_IMG_WIDTH = 512
FULL_IMG_HEIGHT = 512
IMG_CHANNELS = 3
PATCH_WIDTH = 32
PATCH_HEIGHT = 32
INPUT_WIDTH = PATCH_WIDTH*2
INPUT_HEIGHT = PATCH_HEIGHT*2
def generate_image(with_intermediates=False):
# the image has a random greenish background that is smoothed
backgr = np.zeros([FULL_IMG_HEIGHT, FULL_IMG_WIDTH, IMG_CHANNELS])
backgr[ np.random.rand(FULL_IMG_HEIGHT, FULL_IMG_WIDTH) < 0.3, 1 ] = 1
backgr = ndimage.gaussian_filter(backgr, sigma=(5, 5, 0), order=0)
# between 25 and 75 berries randomly placed
num_berries = np.random.randint(25, 75)
berry_cx = np.random.randint(0, FULL_IMG_WIDTH, size=num_berries)
berry_cy = np.random.randint(0, FULL_IMG_HEIGHT, size=num_berries)
label = np.zeros([FULL_IMG_WIDTH, FULL_IMG_HEIGHT])
label[berry_cx, berry_cy] = 1
# draw the berries which are 10 px in radius
berries = np.zeros([FULL_IMG_HEIGHT, FULL_IMG_WIDTH])
for idx in range(len(berry_cx)):
rr, cc = draw.circle(berry_cx[idx], berry_cy[idx],
radius=10,
shape=berries.shape)
berries[rr, cc] = 1
# add berries to the background
img = np.copy(backgr)
img[berries > 0] = [1, 0, 0] # red
if with_intermediates:
print("{} berries simulated".format(num_berries))
return backgr, berries, img, label
else:
return img, label
images = generate_image(True)
f, ax = plt.subplots(1, len(images), figsize=(15, 5))
for idx, img in enumerate(images):
ax[idx].imshow(img)
ax[idx].axis('off')
## given an image, get the patches
def get_patches(img, label, verbose=False):
img = tf.expand_dims(img, axis=0)
label = tf.expand_dims(tf.expand_dims(label, axis=0), axis=-1)
if verbose:
print(img.shape, label.shape)
num_patches = (FULL_IMG_HEIGHT // PATCH_HEIGHT)**2
patches = tf.image.extract_patches(img,
sizes=[1, INPUT_HEIGHT, INPUT_WIDTH, 1],
strides=[1, PATCH_HEIGHT, PATCH_WIDTH, 1],
rates=[1, 1, 1, 1],
padding='SAME',
name='get_patches')
patches = tf.reshape(patches, [num_patches, -1])
labels = tf.image.extract_patches(label,
sizes=[1, PATCH_HEIGHT, PATCH_WIDTH, 1],
strides=[1, PATCH_HEIGHT, PATCH_WIDTH, 1],
rates=[1, 1, 1, 1],
padding='VALID',
name='get_labels')
labels = tf.reshape(labels, [num_patches, -1])
# the "density" is the number of points in the label patch
patch_labels = tf.math.reduce_sum(labels, axis=[1], name='calc_density')
if verbose:
print(patches.shape, labels.shape, patch_labels.shape)
return patches, patch_labels
# Getting input data
def create_dataset(num_full_images):
def generate_patches():
for i in range(num_full_images):
img, label = generate_image()
patches, patch_labels = get_patches(img, label)
# print(len(patches) * num_full_images)
for patch, patch_label in zip(patches, patch_labels):
yield patch, patch_label
return tf.data.Dataset.from_generator(
generate_patches,
(tf.float32, tf.float32), # patch, patch_label
(tf.TensorShape([INPUT_HEIGHT*INPUT_WIDTH*IMG_CHANNELS]),
tf.TensorShape([]))
)
trainds = create_dataset(1) # will create 256 patches per image
for img, label in trainds.take(3):
avg = tf.math.reduce_mean(img) # avg pixel in image
print(img.shape, label.numpy(), avg.numpy())
# Train
NUM_TRAIN = 200 # 10000 more realistic
NUM_EVAL = 10 # 1000 more realistic
NUM_EPOCHS = 5
def training_plot(metrics, history):
f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5))
for idx, metric in enumerate(metrics):
ax[idx].plot(history.history[metric], ls='dashed')
ax[idx].set_xlabel("Epochs")
ax[idx].set_ylabel(metric)
ax[idx].plot(history.history['val_' + metric]);
ax[idx].legend([metric, 'val_' + metric])
def train_and_evaluate(batch_size = 32,
lrate = 0.001, # default in Adam constructor
l1 = 0,
l2 = 0,
num_filters = 32):
regularizer = tf.keras.regularizers.l1_l2(l1, l2)
train_dataset = create_dataset(NUM_TRAIN).batch(batch_size)
eval_dataset = create_dataset(NUM_EVAL).batch(64)
# a simple convnet. you can make it more complex, of course
# the patch is flattened, so we start by reshaping to an image
model = tf.keras.Sequential([
tf.keras.layers.Reshape([INPUT_HEIGHT, INPUT_WIDTH, IMG_CHANNELS],
input_shape=[INPUT_WIDTH * INPUT_HEIGHT * IMG_CHANNELS]),
tf.keras.layers.Conv2D(num_filters, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(num_filters*2, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(num_filters*2, (3,3), activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(num_filters*2,
kernel_regularizer=regularizer,
activation=tf.keras.activations.relu),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate),
loss=tf.keras.losses.mean_squared_error,
metrics=['mse', 'mae'])
print(model.summary())
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=NUM_EPOCHS)
training_plot(['loss', 'mse', 'mae'], history)
return model
model = train_and_evaluate()
## prediction.
def count_berries(model, img):
num_patches = (FULL_IMG_HEIGHT // PATCH_HEIGHT)**2
img = tf.expand_dims(img, axis=0)
patches = tf.image.extract_patches(img,
sizes=[1, INPUT_HEIGHT, INPUT_WIDTH, 1],
strides=[1, PATCH_HEIGHT, PATCH_WIDTH, 1],
rates=[1, 1, 1, 1],
padding='SAME',
name='get_patches')
patches = tf.reshape(patches, [num_patches, -1])
densities = model.predict(patches)
return tf.reduce_sum(densities)
# use an example image
f, ax = plt.subplots(4, 4, figsize=(20, 20))
for idx in range(16):
backgr, berries, img, label = generate_image(True)
ax[idx//4, idx%4].imshow(img)
ax[idx//4, idx%4].set_title("actual={:.1f} pred={:.1f}".format(
tf.reduce_sum(label).numpy(),
count_berries(model, img).numpy()
))
ax[idx//4, idx%4].axis('off')
# OPTIONAL, CAN BE OMITTED
img, label = images = generate_image()
patches, labels = get_patches(img, label, verbose=True)
# display a few patches
f, ax = plt.subplots(4, 4, figsize=(20, 20))
for idx in range(16):
r = np.random.randint(0, patches.shape[0])
ax[idx//4, idx%4].imshow(tf.reshape(patches[r], [INPUT_HEIGHT, INPUT_WIDTH, IMG_CHANNELS]).numpy())
ax[idx//4, idx%4].set_title("density={:.1f}".format(labels[r].numpy()))
ax[idx//4, idx%4].axis('off')
!file berries.jpg
contents = tf.io.read_file('./berries.jpg')
img = tf.image.decode_image(contents)
img = tf.image.resize(img, [FULL_IMG_WIDTH, FULL_IMG_HEIGHT])
n = count_berries(model, img)
print(n.numpy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This doesn't give us any new information, so lets see what happens when we multiply the state vector again
Step2: Now, we have "simulated" the Markov chain twice, which tells us the weather probability in two days. What would happen if we multiplied our new vector by the matrix a large number of times?
Step3: You can see that for enough iterations we will eventually converge to a steady state ${\bf x}^* $, and multiplying this steady state by the Markov matrix will no longer modify the vector, i.e.
Step4: Run your power_iteration() function on M and a new vector,
Step5: As long as the starting state vector x is normalized (the entries add up to one), the steady state solution will be the same. There is one caveat to this statement, which we will discuss in the next section.
|
<ASSISTANT_TASK:>
Python Code:
x1 = M @ x
x1
x2 = M @ x1
x2
xc = x.copy()
# Write loop here
#grade (enter your code in this cell - DO NOT DELETE THIS LINE)
def power_iteration(M, x):
# Perform power iteration and return steady state vector xstar
xc = x.copy()
return xc
power_iteration(M, np.array([0.5, 0.5]))
# run this as many times as you want, the bottom vector should always stay the same!
random_vector = np.random.rand(2)
random_vector /= np.sum(random_vector) # normalize
print(random_vector)
print(power_iteration(M, random_vector))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Querying
Step2: A query can be printed as CQL or as XCQL
Step3: To search the database using this particular query, one needs to use the search method on a database object. This spits out a result set.
Step4: Handling the results
Step5: Each result in the result set refers to a particular recordStore in which, surprise surprise, the actual chapter is stored.
Step6: Understanding the results
Step7: From what I gather, a result in a resultSet is only a pointer to the document and not the document itself.
Step8: The get_dom(session) method spits out the record in parsed xml.
Step9: A record can be transformed into raw xml (in order to understand it), using
Step10: This could also be used in simple python string manipulations.
Step11: Transforming a result
Step12: Retrieving a chapter
Step13: Searching in a specific book
Step14: Messing around
Step15: Phrase search
Step16: And search
Step17: Or search
Step18: Proximity Information
Step19: Term highlighting
|
<ASSISTANT_TASK:>
Python Code:
# coding: utf-8
import os
from cheshire3.baseObjects import Session
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
session = Session()
session.database = 'db_dickens'
serv = SimpleServer(session, os.path.join(cheshire3Root, 'configs', 'serverConfig.xml'))
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
resultSetStore = db.get_object(session, 'resultSetStore')
idxStore = db.get_object(session, 'indexStore')
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" and/cql.proxinfo c3.chapter-idx = "fog"')
print query.toCQL()
print query.toXCQL()
result_set = db.search(session, query)
result_set
print len(result_set)
for result in result_set:
print result
for result in result_set:
print 'result.id: ', result.id
print 'result.database: ', result.database
print 'result.occurrences: ', result.occurences
print 'result.proxInfo: ', result.proxInfo
print "#########"
for result in result_set:
print result.attributesToSerialize
for result in result_set:
rec = result.fetch_record(session)
print type(rec), rec
for result in result_set:
rec = result.fetch_record(session)
print "rec.id: ", rec.id
print 'rec.wordCount: ', rec.wordCount
print 'rec.get_dom(session): ', rec.get_dom(session)
print "#######"
result_set.attributesToSerialize
result.attributesToSerialize
for result in result_set:
print result.serialize(session)
from lxml import etree
rec_tostring = etree.tostring(rec2)
print rec_tostring
# find the first occurrence of the term love
# because that is what we are all looking for
love = rec_tostring.find('love')
conc_line = rec_tostring[love-50 : love + len('love') + 50]
conc_line.replace('love', 'LOVE')
kwicTransformer = db.get_object(session, 'kwic-Txr')
print kwicTransformer
doc = kwicTransformer.process_record(session, rec).get_raw(session)
print doc
from cheshire3.transformer import XmlTransformer
dctxr = db.get_object(session, 'kwic-Txr')
dctxr
doc = dctxr.process_record(session, record)
print doc.get_raw(session)[:1000]
query = qf.get_query(session, 'c3.book-idx = "LD"')
result_set = db.search(session, query)
chapter_1 = result_set[0]
chapter_44 = result_set[43]
chapter_1
rec = chapter_1.fetch_record(session).get_dom(session)
print rec
rec.attrib
rec.attrib['id']
type(rec)
print rec
doc = kwicTransformer.process_record(session, chapter_1.fetch_record(session)).get_raw(session)
print doc
articleTransformer = db.get_object(session, 'article-Txr')
doc = articleTransformer.process_record(session, chapter_1.fetch_record(session)).get_raw(session)
print doc
#FIXME How can you get immediately query for a chapter,
# rather than getting all chapters of a book first?
# --> you need to build a better index for this
query = qf.get_query(session, 'c3.book-idx "LD" and div.id = "LD.1"')
result_set = db.search(session, query)
len(result_set)
#TODO if recordStore's are unique AND they represent chapters, it could also be possible to simply
# get a particular recordStore from Cheshire (without querying the database again).
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" and c3.chapter-idx = "fog" and c3.book-idx = "BH"')
result_set = db.search(session, query)
len(result_set)
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and/cql.proxinfo c3.chapter-idx = "dense fog" \
') #and c3.chapter-idx = "dense"')
rs = db.search(session, query)
len(rs)
for result in rs:
print result.proxInfo
#FIXME it seems that occurences cannot be trusted?
print result.occurences
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and/cql.proxinfo c3.chapter-idx = "the" \
')
query.addPrefix(query, 'test')
query.toCQL()
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and/proxinfo c3.chapter-idx = "dense fog" \
')
rs = db.search(session, query)
total = 0
for result in rs:
total += len(result.proxInfo)
total
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and/cql.proxinfo c3.chapter-idx = "fog" \
and c3.chapter-idx = "dense"')
rs = db.search(session, query)
len(rs)
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and/cql.proxinfo c3.chapter-idx = "fog" \
or c3.chapter-idx = "dense"')
rs = db.search(session, query)
len(rs)
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and c3.book-idx = "LD"')
rs = db.search(session, query)
len(rs)
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and c3.chapter-idx = "he" prox/distance=1/unordered c3.chapter-idx = "said" \
or c3.chapter-idx = "did" or c3.chapter-idx = "wanted"')
rs = db.search(session, query)
len(rs)
#TODO not
#TODO wildcards
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and c3.chapter-idx window/distance<5/unordered "low voice"')
rs = db.search(session, query)
len(rs)
for result in rs:
print result.proxInfo
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and c3.chapter-idx window/distance<5/unordered "voice low"')
rs = db.search(session, query)
len(rs)
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and c3.chapter-idx window/distance<5/unordered "low high"')
rs = db.search(session, query)
len(rs)
query = qf.get_query(session, 'c3.subcorpus-idx = "dickens" \
and c3.chapter-idx window/distance<3 "Mr Arthur said"')
rs = db.search(session, query)
len(rs)
query = qf.get_query(session, '(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.chapter-idx any "dense fog")')
result_set = db.search(session, query)
count = 0
for result in result_set:
record = result.fetch_record(session)
print result.occurences, record #wordCount #.process_xpath('//w[@o=%s]' % result.proxInfo[0][1])
for y in result.proxInfo:
print y
count += 1
#TODO why does proxinfo only have three values here?
# --> because the last any does not have a proxinfo value
from cheshire3.transformer import LxmlQueryTermHighlightingTransformer
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The plot explains the above facts in a different representation.
Step2: The plot explains the distribution of survivors across age and class. More red on the lower part of the left swarm indicates that younger passengers in the third class had the least chance to survive.
Step3: The plot shows that male passengers had the least chance of survival and female passengers had the best chance of survival.
Step4: Same data with a different representation.
Step5: Plot showing distribution of fares among classes of travel. A first class ticket is about 4 times a second class ticket.
Step6: The plot shows differences in fares based on the point of embarkation.
|
<ASSISTANT_TASK:>
Python Code:
sns.barplot(x='Pclass',y='Survived',data=train, hue='Sex')
sns.barplot(x='Sex',y='Survived',data=train, hue='Pclass')
sns.swarmplot(x='Survived',y='Age',hue='Pclass',data=train)
sns.swarmplot(x='Survived',y='Age',hue='Sex',data=train)
sns.swarmplot(x='Sex',y='Age',data=train)
sns.pointplot(x='Pclass',y='Fare',data=train)
sns.barplot(x='Embarked',y='Fare',data=train)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: These data (~71 million rows) were taken from https
Step2: Function Definitions
Step3: Non-vectorized function
Step4: Non-vectorized string functions
Step5: Perfplot
Step6: Non-vectorized function speed comparison
Step7: Non-vectorized string function speed comparison
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import dask.dataframe as dd
import swifter
import perfplot
import matplotlib.pyplot as plt
import psutil
ncores = psutil.cpu_count()
npartitions = ncores*2
data = pd.read_feather("../../swifter_data/data/status")
data = pd.read_csv('status.csv')
data['date'] = pd.to_datetime(data['time'])
print(data.shape)
data.head()
def bikes_proportion(x, max_x):
return x * 1.0 / max_x
def is_morning(datetime):
if (datetime.hour >= 6) and (datetime.hour < 12):
return True
else:
return False
def display_information(row):
return f"Station ID {row['station_id']} had {row['bikes_available']} bikes available at {row['time']}. This is {np.where(row['bikes_available'] > row['docks_available'], 'more', 'less')} than the number of docks available ({row['docks_available']})."
vectorized_comparison = perfplot.bench(
setup=lambda n: data.loc[:n, 'bikes_available'],
kernels=[
lambda df: df.apply(bikes_proportion, max_x=np.max(data['bikes_available'])),
lambda df: dd.from_pandas(df, npartitions=npartitions).map_partitions(bikes_proportion, max_x=np.max(data['bikes_available']), meta=('f8')).compute(scheduler='threads'),
lambda df: bikes_proportion(df, max_x=np.max(data['bikes_available'])),
lambda df: df.swifter.progress_bar(False).apply(bikes_proportion, max_x=np.max(data['bikes_available']))
],
labels=['Pandas Apply', 'Dask Apply', 'Vectorized Function', 'Swifter Apply'],
n_range=[2**k for k in range(0, 28, 2)],
xlabel='n_rows'
)
vectorized_comparison.title = "Vectorized Function Speed Comparison"
vectorized_comparison.xlabel = "n_rows"
vectorized_comparison.plot(logx=False, logy=False)
vectorized_comparison.xlabel = 'log10(n_rows)'
vectorized_comparison.title = 'Vectorized Function Speed Comparison [LogLog Plot]'
vectorized_comparison.plot(logx=True, logy=True, time_unit='s')
plt.ylabel("Runtime [log10(s)]")
nonvectorized_comparison = perfplot.bench(
setup=lambda n: data.loc[:n, 'date'],
kernels=[
lambda df: df.apply(is_morning),
lambda df: dd.from_pandas(df, npartitions=npartitions).apply(is_morning, meta=(bool)).compute(scheduler='processes'),
lambda df: df.swifter.progress_bar(False).apply(is_morning)
],
labels=['Pandas Apply', 'Dask Apply', 'Swifter Apply'],
n_range=[2**k for k in range(0, 28, 2)],
xlabel='n_rows'
)
nonvectorized_comparison.title = "Non-Vectorized Function Speed Comparison"
nonvectorized_comparison.xlabel = "n_rows"
nonvectorized_comparison.plot(logx=False, logy=False)
nonvectorized_comparison.xlabel = 'log10(n_rows)'
nonvectorized_comparison.title = 'Non-Vectorized Function Speed Comparison [LogLog Plot]'
nonvectorized_comparison.plot(logx=True, logy=True, time_unit='s')
plt.ylabel("Runtime [log10(s)]")
def clean_text(text_row):
text = text_row["text"].strip()
text = text.replace(' ', '_')
return text
N_rows = 70000000
df_data = pd.DataFrame([["i want to break free"]] * N_rows, columns=["text"])
nonvectorized_string_comparison = perfplot.bench(
setup=lambda n: data.loc[:n],
kernels=[
lambda df: df.apply(display_information, axis=1),
lambda df: dd.from_pandas(df, npartitions=npartitions).apply(display_information, axis=1, meta=(str)).compute(scheduler='processes'),
lambda df: df.swifter.progress_bar(False).allow_dask_on_strings(True).apply(display_information, axis=1),
lambda df: df.swifter.progress_bar(False).allow_dask_on_strings(False).apply(display_information, axis=1),
],
labels=['Pandas Apply', 'Dask Apply', 'Swifter Dask Apply', 'Swifter Modin Apply'],
n_range=[2**k for k in range(0, 28, 2)],
xlabel='n_rows',
equality_check=lambda left, right: left.equals(right)
)
fig = plt.figure()
nonvectorized_string_comparison.title = "Non-Vectorized String Function Speed Comparison"
nonvectorized_string_comparison.xlabel = "n_rows"
nonvectorized_string_comparison.plot(logx=False, logy=False)
plt.savefig("nonvectorized_string_axis1_speed_comparison.png")
fig = plt.figure()
nonvectorized_string_comparison.xlabel = 'log10(n_rows)'
nonvectorized_string_comparison.title = 'Non-Vectorized String Function Speed Comparison [LogLog Plot]'
nonvectorized_string_comparison.plot(logx=True, logy=True, time_unit='s')
plt.ylabel("Runtime [log10(s)]")
plt.savefig("nonvectorized_string_axis1_speed_comparison_loglog.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Deflator data
Step2: Per capita income data
Step3: Load Easterlin's data
|
<ASSISTANT_TASK:>
Python Code:
# Import BEA API key or set manually to variable api_key
try:
items = os.getcwd().split('/')[:3]
items.append('bea_api_key.txt')
path = '/'.join(items)
with open(path,'r') as api_key_file:
api_key = api_key_file.readline()
except:
api_key = None
# Dictionary of state abbreviations
stateAbbr = {
u'Alabama':u'AL',
u'Alaska *':u'AK',
u'Arizona':u'AZ',
u'Arkansas':u'AR',
u'California':u'CA',
u'Colorado':u'CO',
u'Connecticut':u'CT',
u'Delaware':u'DE',
u'District of Columbia':u'DC',
u'Florida':u'FL',
u'Georgia':u'GA',
u'Hawaii *':u'HI',
u'Idaho':u'ID',
u'Illinois':u'IL',
u'Indiana':u'IN',
u'Iowa':u'IA',
u'Kansas':u'KS',
u'Kentucky':u'KY',
u'Louisiana':u'LA',
u'Maine':u'ME',
u'Maryland':u'MD',
u'Massachusetts':u'MA',
u'Michigan':u'MI',
u'Minnesota':u'MN',
u'Mississippi':u'MS',
u'Missouri':u'MO',
u'Montana':u'MT',
u'Nebraska':u'NE',
u'Nevada':u'NV',
u'New Hampshire':u'NH',
u'New Jersey':u'NJ',
u'New Mexico':u'NM',
u'New York':u'NY',
u'North Carolina':u'NC',
u'North Dakota':u'ND',
u'Ohio':u'OH',
u'Oklahoma':u'OK',
u'Oregon':u'OR',
u'Pennsylvania':u'PA',
u'Rhode Island':u'RI',
u'South Carolina':u'SC',
u'South Dakota':u'SD',
u'Tennessee':u'TN',
u'Texas':u'TX',
u'Utah':u'UT',
u'Vermont':u'VT',
u'Virginia':u'VA',
u'Washington':u'WA',
u'West Virginia':u'WV',
u'Wisconsin':u'WI',
u'Wyoming':u'WY'
}
# List of states in the US
stateList = [s for s in stateAbbr]
# Obtain data from BEA
gdp_deflator = urlopen('http://apps.bea.gov/api/data/?UserID='+api_key+'&method=GetData&datasetname=NIPA&TableName=T10109&TableID=13&Frequency=A&Year=X&ResultFormat=JSON&')
# Parse result
result = gdp_deflator.read().decode('utf-8')
json_response = json.loads(result)
# Import to DataFrame and organize
df = pd.DataFrame(json_response['BEAAPI']['Results']['Data'])
df['DataValue'] = df['DataValue'].astype(float)
df = df.set_index(['LineDescription',pd.to_datetime(df['TimePeriod'])])
df.index.names = ['line description','Year']
# Extract price level data
data_p = df['DataValue'].loc['Gross domestic product']/100
data_p.name = 'price level'
data_p = data_p.sort_index()
data_p
base_year = json_response['BEAAPI']['Results']['Notes'][0]['NoteText'].split('Index numbers, ')[-1].split('=')[0]
with open('../csv/state_income_metadata.csv','w') as newfile:
newfile.write(',Values\n'+'base_year,'+base_year)
# Obtain data from BEA
state_y_pc = urlopen('http://apps.bea.gov/api/data/?UserID='+api_key+'&method=GetData&DataSetName=Regional&TableName=SAINC1&LineCode=3&Year=ALL&GeoFips=STATE&ResultFormat=JSON')
# Parse result
result = state_y_pc.read().decode('utf-8')
json_response = json.loads(result)
# Import to DataFrame and organize
df = pd.DataFrame(json_response['BEAAPI']['Results']['Data'])
df.GeoName = df.GeoName.replace(stateAbbr)
df = df.set_index(['GeoName',pd.DatetimeIndex(df['TimePeriod'])])
df.index.names = ['State','Year']
df['DataValue'] = df['DataValue'].replace('(NA)',np.nan)
# Extract income data
data_y = df['DataValue'].str.replace(',','').astype(float)
data_y.name = 'income'
data_y = data_y.unstack('State')
data_y = data_y.sort_index()
data_y = data_y.divide(data_p,axis=0)
data_y
# Import Easterlin's income data
easterlin_data = pd.read_csv('../historic_data/Historical Statistics of the US - Easterlin State Income Data.csv',index_col=0)
# Import historic CPI data
historic_cpi_data=pd.read_csv('../historic_data/Historical Statistics of the US - cpi.csv',index_col=0)
historic_cpi_data = historic_cpi_data/historic_cpi_data.loc[1929]*float(data_p.loc['1929'])
# Construct series for real incomes in 1840, 1880, and 1900
df_1840 = easterlin_data['Income per capita - 1840 - A [cur dollars]']/float(historic_cpi_data.loc[1840])
df_1880 = easterlin_data['Income per capita - 1880 [cur dollars]']/float(historic_cpi_data.loc[1890])
df_1900 = easterlin_data['Income per capita - 1900 [cur dollars]']/float(historic_cpi_data.loc[1900])
# Put into a DataFrame and concatenate with previous data beginning in 1929
df = pd.DataFrame({pd.to_datetime('1840'):df_1840,pd.to_datetime('1880'):df_1880,pd.to_datetime('1900'):df_1900}).transpose()
df = pd.concat([data_y,df]).sort_index()
# Export data to csv
series = df.sort_index()
dropCols = [u'AK', u'HI', u'New England', u'Mideast', u'Great Lakes', u'Plains', u'Southeast', u'Southwest', u'Rocky Mountain', u'Far West']
for c in dropCols:
series = series.drop([c],axis=1)
series.to_csv('../csv/state_income_data.csv',na_rep='NaN')
# Export notebook to .py
runProcs.exportNb('state_income_data')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Load CIFAR-10 data
Step7: Softmax Classifier
Step8: Sanity Check
Step10: Vectorized loss function
Step12: Stochastic Gradient Descent
|
<ASSISTANT_TASK:>
Python Code:
import random
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.data_utils import load_CIFAR10
from cs231n.gradient_check import grad_check_sparse
# plotting setting
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the linear classifier. These are the same steps as we used for the
SVM, but condensed to a single function.
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]).T
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]).T
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]).T
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print('Train data shape:', X_train.shape)
print('Train labels shape:', y_train.shape)
print('Validation data shape:', X_val.shape)
print('Validation labels shape:', y_val.shape)
print('Test data shape:', X_test.shape)
print('Test labels shape:', y_test.shape)
class LinearClassifier:
def __init__(self):
self.W = None
def train(
self, X, y,
learning_rate=1e-3, reg=1e-5, num_iters=100, batch_size=200,
verbose=False, seed=None
):
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: D x N array of training data. Each training point is a
D-dimensional column.
- y: 1-dimensional array of length N with labels 0...K-1 for K classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing.
- batch_size: (integer) number of training examples to use
at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training
iteration.
dim, num_train = X.shape
# assume y takes values 0...K-1 where K is number of classes
num_classes = np.max(y) + 1
if self.W is None:
# lazily initialize W
self.W = np.random.randn(num_classes, dim) * 0.001
batch_rs = np.random.RandomState(seed)
# Run stochastic gradient descent to optimize W
loss_history = []
for it in range(num_iters):
batch_ix = batch_rs.choice(
np.arange(num_train),
size=batch_size, replace=True
)
X_batch = X[:, batch_ix]
y_batch = y[batch_ix]
# evaluate loss and gradient, internally use self.W
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
# perform parameter update
self.W -= grad * learning_rate
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
return loss_history
def predict(self, X):
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
score_pred = self.W.dot(X) # shape: C x N
y_pred = score_pred.argmax(axis=0)
return y_pred
def loss(self, X_batch, y_batch, reg):
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: D x N array of data; each column is a data point.
- y_batch: 1-dimensional array of length N with labels 0...K-1, for K classes.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
raise NotImplementedError
def softmax_loss_naive(W, X, y, reg):
Softmax loss function, naive implementation (with loops)
Inputs:
- W: C x D array of weights
- X: D x N array of data. Data are D-dimensional columns
- y: 1-dimensional array of length N with labels 0...K-1, for K classes
- reg: (float) regularization strength
Returns:
a tuple of:
- loss as single float
- gradient with respect to weights W, an array of same size as W
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
num_train = X.shape[1]
f = W.dot(X) # shape: C x N
p = np.zeros(num_train, dtype=np.float)
for i in range(num_train):
f_i = f[:, i].copy() # shape C x 1
f_i -= np.max(f_i) # improve numerical stability
f_i = np.exp(f_i)
x_i = X[:, i]
all_class_p_i = f_i / np.sum(f_i)
p[i] = all_class_p_i[y[i]]
# Update gradient
# all_class_p_i no used later, don't copy
dw_x_weight_i = all_class_p_i
dw_x_weight_i[y[i]] -= 1
dW -= dw_x_weight_i[:, np.newaxis] * x_i[np.newaxis, :]
loss += np.mean(-np.log(p))
# Add regularization
loss += 0.5 * reg * np.sum(W * W)
# Gradient
# ref: http://ufldl.stanford.edu/wiki/index.php/Softmax_Regression
dW /= -num_train
dW += reg * W
return loss, dW
%%time
# Generate a random softmax weight matrix and use it to compute the loss.
W = np.random.randn(10, 3073) * 0.0001
loss, grad = softmax_loss_naive(W, X_train, y_train, 0.0)
# As a rough sanity check, our loss should be something close to -log(0.1).
print('loss: %f' % loss)
print('sanity check: %f' % (-np.log(0.1)))
%%time
# Complete the implementation of softmax_loss_naive and implement a (naive)
# version of the gradient that uses nested loops.
loss, grad = softmax_loss_naive(W, X_train, y_train, 0.0)
# As we did for the SVM, use numeric gradient checking as a debugging tool.
# The numeric gradient should be close to the analytic gradient.
f = lambda w: softmax_loss_naive(w, X_train, y_train, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 5) # increase 5 here to check for more times
def softmax_loss_vectorized(W, X, y, reg):
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
num_train = X.shape[1]
_train_ix = np.arange(num_train) # for sample coord 0...N-1
f = W.dot(X) # shape: C x N
f -= np.max(f, axis=0) # improve numerical stability
f = np.exp(f)
p = f / np.sum(f, axis=0) # shape: C x N
# loss function
loss += np.mean(-np.log(p[y, _train_ix]))
loss += 0.5 * reg * np.sum(W * W)
# gradient
# ref: http://ufldl.stanford.edu/wiki/index.php/Softmax_Regression
dW_x_weight = p # no use p later, don't copy
dW_x_weight[y, _train_ix] -= 1
# CxD -= CxN dot NxD
dW -= dW_x_weight.dot(X.T)
dW /= -num_train
dW += reg * W
return loss, dW
# Now that we have a naive implementation of the softmax loss function and its gradient,
# implement a vectorized version in softmax_loss_vectorized.
# The two versions should compute the same results, but the vectorized version should be
# much faster.
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, X_train, y_train, 0.00001)
toc = time.time()
print('naive loss: %e computed in %fs' % (loss_naive, toc - tic))
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_train, y_train, 0.00001)
toc = time.time()
print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))
print('Gradient difference: %f' % grad_difference)
class Softmax(LinearClassifier):
A subclass that uses the Softmax + Cross-entropy loss function
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
# Now implement SGD in LinearSVM.train() function and run it with the code below
softmax = Softmax()
tic = time.time()
loss_hist = softmax.train(
X_train, y_train,
learning_rate = 5e-8, reg=1e3,
# better params:
# learning_rate=5e-7, reg=5e4,
num_iters=1500, seed=9527, verbose=True
)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
y_train_pred = softmax.predict(X_train)
print('training accuracy: {:.3%}'.format(np.mean(y_train == y_train_pred)))
y_val_pred = softmax.predict(X_val)
print('validation accuracy: {:.3%}'.format(np.mean(y_val == y_val_pred)))
# evaluate on test set
# Evaluate the best svm on test set
y_test_pred = softmax.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, ))
# Visualize the learned weights for each class
w = softmax.W[:,:-1] # strip out the bias
w = w.reshape(10, 32, 32, 3)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Customization basics
Step2: Tensors
Step3: Each tf.Tensor has a shape and a datatype
Step4: The most obvious differences between NumPy arrays and tf.Tensors are
Step5: GPU acceleration
Step6: Device names
Step8: Datasets
Step9: Apply transformations
Step10: Iterate
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
print(tf.math.add(1, 2))
print(tf.math.add([1, 2], [3, 4]))
print(tf.math.square(5))
print(tf.math.reduce_sum([1, 2, 3]))
# Operator overloading is also supported
print(tf.math.square(2) + tf.math.square(3))
x = tf.linalg.matmul([[1]], [[2, 3]])
print(x)
print(x.shape)
print(x.dtype)
import numpy as np
ndarray = np.ones([3, 3])
print("TensorFlow operations convert numpy arrays to Tensors automatically")
tensor = tf.math.multiply(ndarray, 42)
print(tensor)
print("And NumPy operations convert Tensors to NumPy arrays automatically")
print(np.add(tensor, 1))
print("The .numpy() method explicitly converts a Tensor to a numpy array")
print(tensor.numpy())
x = tf.random.uniform([3, 3])
print("Is there a GPU available: "),
print(tf.config.list_physical_devices("GPU"))
print("Is the Tensor on GPU #0: "),
print(x.device.endswith('GPU:0'))
import time
def time_matmul(x):
start = time.time()
for loop in range(10):
tf.linalg.matmul(x, x)
result = time.time()-start
print("10 loops: {:0.2f}ms".format(1000*result))
# Force execution on CPU
print("On CPU:")
with tf.device("CPU:0"):
x = tf.random.uniform([1000, 1000])
assert x.device.endswith("CPU:0")
time_matmul(x)
# Force execution on GPU #0 if available
if tf.config.list_physical_devices("GPU"):
print("On GPU:")
with tf.device("GPU:0"): # Or GPU:1 for the 2nd GPU, GPU:2 for the 3rd etc.
x = tf.random.uniform([1000, 1000])
assert x.device.endswith("GPU:0")
time_matmul(x)
ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])
# Create a CSV file
import tempfile
_, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
f.write(Line 1
Line 2
Line 3
)
ds_file = tf.data.TextLineDataset(filename)
ds_tensors = ds_tensors.map(tf.math.square).shuffle(2).batch(2)
ds_file = ds_file.batch(2)
print('Elements of ds_tensors:')
for x in ds_tensors:
print(x)
print('\nElements in ds_file:')
for x in ds_file:
print(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ## Resultados da regularização
|
<ASSISTANT_TASK:>
Python Code:
# Inicializacao
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
def nova_mlp(entradas, saidas, camadas):
lista_de_camadas = [entradas] + camadas + [saidas]
pesos = []
for i in xrange(len(lista_de_camadas)-1):
pesos.append(np.random.random((lista_de_camadas[i+1], lista_de_camadas[i])))
return pesos
def ff_mlp(entradas, pesos):
s = entradas
for i in xrange(len(pesos)-1):
s = np.tanh(np.dot(pesos[i],s))
s = np.dot(pesos[-1],s)
return s
def backpropagation_step(entradas, saidas, pesos, regular=0.01, passo=0.01):
derivadas = []
resultados_intermediarios = [entradas]
s = entradas
for i in xrange(len(pesos)-1):
s = np.tanh(np.dot(pesos[i],s))
resultados_intermediarios.append(s)
s = np.dot(pesos[-1],s)
resultados_intermediarios.append(s)
# Derivada do erro em relacao a saida estimada
dedye = (resultados_intermediarios[-1] - saidas)
# Derivada em relacao a camada de saida linear
dedb = np.dot(dedye, resultados_intermediarios[-2].T)
# Para cada camada nao-linear, calcula a nova derivada na forma:
deda = dedye
for i in range(len(pesos)-2, -1, -1):
linear = np.dot(pesos[i], resultados_intermediarios[i])
flz = (1-np.tanh(linear)**2)
deda = np.dot(pesos[i+1].T, deda) # deriv_front
derivada = np.dot(deda * flz, resultados_intermediarios[i].T)
derivadas.insert (0, derivada)
derivadas.append(dedb)
# Executa um passo na direcao contraria da derivada e adiciona minimizacao da
# norma L1 ponderada por lambda
for i in xrange(len(derivadas)):
n = np.linalg.norm(derivadas[i])
pesos[i] -= passo * (derivadas[i]/n + (regular * np.sign(pesos[i])))
return pesos
def erro(y, y_e):
return np.sum((y-y_e)**2)
# Usando 30 amostras aleatoriamente escolhidas (30% do total) para o aprendizado
# e bias nas entradas
import random
x = np.linspace(-3, 3, num=100)
y_ = x**2
y = y_ + np.random.normal(0, 0.5, 100) # Criando uma parabola com ruido
x.shape = (1, x.size)
y.shape = (1, y.size)
x_treino = x[:,0:30]
x_teste = x[:,30:100]
y_treino = y[:,0:30]
y_teste = y[:,30:100]
x.shape = (x.size)
y.shape = (y.size)
train_array = np.zeros((x.size)).astype(bool)
test_array = np.ones((x.size)).astype(bool)
while np.sum(train_array) < 30:
n = int(random.random() * x.size)
test_array[n] = False
train_array[n] = True
x_treino = x[train_array]
x_teste = x[test_array]
y_treino = y[train_array]
y_teste = y[test_array]
x_treino.shape = (1,x_treino.size)
y_treino.shape = (1,y_treino.size)
x_teste.shape = (1,x_teste.size)
y_teste.shape = (1,y_teste.size)
x_treino2 = np.vstack((x_treino, np.ones(x_treino.size)))
x_teste2 = np.vstack((x_teste, np.ones(x_teste.size)))
mlp0 = nova_mlp(entradas=2, saidas=1, camadas=[10])
# Processo de treinamento
n_passos = 2000
eqm_treino = np.zeros((n_passos+1))
eqm_treino[0] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste = np.zeros((n_passos+1))
eqm_teste[0] = erro(y_teste, ff_mlp(x_teste2, mlp0))
for i in xrange(n_passos):
mlp0 = backpropagation_step(x_treino2, y_treino, mlp0, regular=0.3)
eqm_treino[i+1] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste[i+1] = erro(y_teste, ff_mlp(x_teste2, mlp0))
print "EQM final:", eqm_teste[-1]
plt.figure();
plt.plot(range(n_passos+1), eqm_treino);
plt.plot(range(n_passos+1), eqm_teste);
plt.ylabel('EQM');
plt.xlabel('Passos');
plt.title('EQM nos conjuntos de treino e teste');
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, ff_mlp(x_treino2, mlp0).T);
plt.plot(x_teste.T, ff_mlp(x_teste2, mlp0).T);
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: We interact with the simulation through env. To show the simulation running, you can use env.render() to render one frame. Passing in an action as an integer to env.step will generate the next step in the simulation. You can see how many actions are possible from env.action_space and to get a random action you can use env.action_space.sample(). This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1.
Step3: To shut the window showing the simulation, use env.close().
Step4: The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right.
Step5: Experience replay
Step6: Exploration - Exploitation
Step7: Populate the experience memory
Step8: Training
Step9: Visualizing training
Step10: Testing
|
<ASSISTANT_TASK:>
Python Code:
import gym
import tensorflow as tf
import numpy as np
# Create the Cart-Pole game environment
env = gym.make('CartPole-v0')
env.reset()
rewards = []
for _ in range(100):
env.render()
state, reward, done, info = env.step(env.action_space.sample()) # take a random action
rewards.append(reward)
if done:
rewards = []
env.reset()
print(rewards[-20:])
class QNetwork:
def __init__(self, learning_rate=0.01, state_size=4,
action_size=2, hidden_size=10,
name='QNetwork'):
# state inputs to the Q-network
with tf.variable_scope(name):
self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs')
# One hot encode the actions to later choose the Q-value for the action
self.actions_ = tf.placeholder(tf.int32, [None], name='actions')
one_hot_actions = tf.one_hot(self.actions_, action_size)
# Target Q values for training
self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')
# ReLU hidden layers
self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size)
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size)
# Linear output layer
self.output = tf.contrib.layers.fully_connected(self.fc2, action_size,
activation_fn=None)
### Train with loss (targetQ - Q)^2
# output has length 2, for two actions. This next line chooses
# one value from output (per row) according to the one-hot encoded actions.
self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)
self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))
self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
from collections import deque
class Memory():
def __init__(self, max_size = 1000):
self.buffer = deque(maxlen=max_size)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
idx = np.random.choice(np.arange(len(self.buffer)),
size=batch_size,
replace=False)
return [self.buffer[ii] for ii in idx]
train_episodes = 1000 # max number of episodes to learn from
max_steps = 200 # max steps in an episode
gamma = 0.99 # future reward discount
# Exploration parameters
explore_start = 1.0 # exploration probability at start
explore_stop = 0.01 # minimum exploration probability
decay_rate = 0.0001 # exponential decay rate for exploration prob
# Network parameters
hidden_size = 64 # number of units in each Q-network hidden layer
learning_rate = 0.0001 # Q-network learning rate
# Memory parameters
memory_size = 10000 # memory capacity
batch_size = 20 # experience mini-batch size
pretrain_length = batch_size # number experiences to pretrain the memory
tf.reset_default_graph()
mainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate)
# Initialize the simulation
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
memory = Memory(max_size=memory_size)
# Make a bunch of random actions and store the experiences
for ii in range(pretrain_length):
# Uncomment the line below to watch the simulation
# env.render()
# Make a random action
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
if done:
# The simulation fails so no next state
next_state = np.zeros(state.shape)
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
# Now train with experiences
saver = tf.train.Saver()
rewards_list = []
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
step = 0
for ep in range(1, train_episodes):
total_reward = 0
t = 0
while t < max_steps:
step += 1
# Uncomment this next line to watch the training
env.render()
# Explore or Exploit
explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step)
if explore_p > np.random.rand():
# Make a random action
action = env.action_space.sample()
else:
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
total_reward += reward
if done:
# the episode ends so no next state
next_state = np.zeros(state.shape)
t = max_steps
print('Episode: {}'.format(ep),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_p))
rewards_list.append((ep, total_reward))
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
t += 1
# Sample mini-batch from memory
batch = memory.sample(batch_size)
states = np.array([each[0] for each in batch])
actions = np.array([each[1] for each in batch])
rewards = np.array([each[2] for each in batch])
next_states = np.array([each[3] for each in batch])
# Train network
target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})
# Set target_Qs to 0 for states where episode ends
episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1)
target_Qs[episode_ends] = (0, 0)
targets = rewards + gamma * np.max(target_Qs, axis=1)
loss, _ = sess.run([mainQN.loss, mainQN.opt],
feed_dict={mainQN.inputs_: states,
mainQN.targetQs_: targets,
mainQN.actions_: actions})
saver.save(sess, "checkpoints/cartpole.ckpt")
%matplotlib inline
import matplotlib.pyplot as plt
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
eps, rews = np.array(rewards_list).T
smoothed_rews = running_mean(rews, 10)
plt.plot(eps[-len(smoothed_rews):], smoothed_rews)
plt.plot(eps, rews, color='grey', alpha=0.3)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
test_episodes = 10
test_max_steps = 400
env.reset()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
for ep in range(1, test_episodes):
t = 0
while t < test_max_steps:
env.render()
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
if done:
t = test_max_steps
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
state = next_state
t += 1
env.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The distributions are
Step2: Next we load data from the Millennium Simulation and extract a $60 \times 60 \text{ arcmin}^2$ field of view.
Step3: Finally we sample luminosity and observed luminosity with the hyperparameters we drew above. Then we save the dataset.
Step4: Mass Prior
Step5: Next we use the python package hmf [3] to get the mass function (Tinker 2010) for our cosmology (WMAP).
Step6: The issue here is that the hmf distribution does not match the mass distribution from the millennium simulation. This makes sense for the bottom end of our mass range where there are likely many halos on the edge that the halo finder failed to recognize. The part that I am more concerned about is that the decay as we move to high mass is MUCH greater in the halo mass function than we see from the dataset.
Step7: Bias Distribution Test
Step8: To test our biased importance sampling distribution we randomly draw new hyperparameters.
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
Image(filename='pgm_mock_data.png')
from scipy.stats import norm
import numpy as np
np.random.seed(1)
alpha1 = norm(10.709, 0.022).rvs()
alpha2 = norm(0.359, 0.009).rvs()
alpha3 = 2.35e14
alpha4 = norm(1.10, 0.06).rvs()
S = norm(0.155, 0.0009).rvs()
sigma_L = 0.05
print ' alpha1 = {}\n alpha2 = {}\n alpha3 = {}\n alpha4 = {}\n S = {}\n sigma_L = {}'\
.format(alpha1, alpha2, alpha3, alpha4, S, sigma_L)
import pandas as pd
from massinference.angle import Angle
# opening, white-listing, renaming
usecols = ['GalID', 'pos_0[rad]', 'pos_1[rad]', 'M_Subhalo[M_sol/h]', 'z_spec']
guo = pd.read_table(
'/Users/user/Code/Pangloss/data/GGL_los_8_0_0_0_0_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63.images.txt',
usecols=usecols)
guo.rename(
columns={'GalID': 'gal_id',
'M_Subhalo[M_sol/h]': 'mass_h',
'z_spec': 'z'
},
inplace=True)
guo = guo[guo['mass_h'] > 0]
guo['mass_h'] = guo['mass_h'] * 0.73 #remove h^{-1} from mass units, use h from WMAP
# convert to arcmin
guo['ra'] = Angle.radian_to_arcmin(guo['pos_0[rad]'])
guo['dec'] = Angle.radian_to_arcmin(guo['pos_1[rad]'])
# field of view bounds
ra_i = guo['ra'].min()
dec_i = guo['dec'].min()
ra_f = ra_i + 40
dec_f = dec_i + 40
z_i = guo['z'].min()
z_f = guo['z'].max()
# clip data, fov = field of view
fov = guo[(guo['ra'] >= ra_i)
& (guo['ra'] < ra_f)
& (guo['dec'] >= dec_i)
& (guo['dec'] < dec_f)].copy(deep=True)
fov.drop(['pos_0[rad]', 'pos_1[rad]'], axis=1, inplace=True)
fov.columns
from scipy.stats import lognorm
mu_lum = np.exp(alpha1 + alpha2 * np.log(fov.mass_h / alpha3) + alpha4 * (1 + fov.z))
lum = lognorm(S, scale=mu_lum).rvs()
lum_obs = lognorm(sigma_L, loc=lum).rvs()
fov['lum'] = lum
fov['lum_obs'] = lum_obs
fov.to_csv('mock_data.csv')
from math import log
# NOTE: hmf uses units of M_{\odot}/h for mass
h = 0.73
Mmin = log((guo['mass_h'] / h).min()) / log(10)
Mmax = log((guo['mass_h'] / h).max()) / log(10)
print ' Mmin = {}\n Mmax = {}'\
.format(Mmin, Mmax)
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import rc
import hmf
from scipy.stats import rv_discrete
import scipy.interpolate as interpolate
rc('text', usetex=True)
mf = hmf.MassFunction(Mmin=Mmin, Mmax=Mmax, cosmo_model=hmf.cosmo.WMAP5, hmf_model=hmf.fitting_functions.Tinker10)
pdf = mf.dndm / sum(mf.dndm)
cum_values = np.cumsum(pdf)
inv_cdf = interpolate.interp1d(cum_values, mf)
plt.figure(figsize=(8,4))
plt.subplot(311)
plt.ylabel('Density')
plt.hist(np.log(fov.mass_h) / np.log(10), alpha=0.5, normed=True, label='millennium')
plt.plot(np.log(mf.m * h) /np.log(10), 50*pdf, color='red', label='hmf')
plt.legend()
plt.subplot(312)
plt.ylabel('Density')
ms_mass = np.log(fov.mass_h) / np.log(10)
ms_mass_cut = ms_mass[ms_mass >= 11.]
plt.hist(ms_mass_cut, alpha=0.5, normed=True, label='millennium')
cut_ind = 90
m_cut = (np.log(mf.m * h) /np.log(10))[cut_ind:]
pdf_cut = (1.85e3*pdf)[cut_ind:]
plt.plot(m_cut, pdf_cut, color='red', label='hmf')
plt.legend()
plt.subplot(313)
plt.xlabel('$\log_{10}M_{\odot}$')
plt.ylabel('Density')
ms_mass = np.log(fov.mass_h) / np.log(10)
ms_mass_cut = ms_mass[ms_mass >= 12.]
plt.hist(ms_mass_cut, alpha=0.5, normed=True, label='millennium')
cut_ind = 190
m_cut = (np.log(mf.m * h) /np.log(10))[cut_ind:]
pdf_cut = (2e5*pdf)[cut_ind:]
plt.plot(m_cut, pdf_cut, color='red', label='hmf')
plt.suptitle('PDF Cut Overlays')
plt.legend()
import scipy.interpolate as interpolate
class MassPrior():
def __init__(self, mass, prob):
self.mass = mass
self.prob = prob
self.min = mass.min()
self.max = mass.max()
# have to add 0,1 samples for interpolation bounds
cumsum = np.append(np.append(np.array([0]), np.cumsum(prob)), np.array([1]))
masses = np.append(np.append(np.array([self.min-1]), self.mass), np.array([self.max+1]))
self.inv_cdf = interpolate.interp1d(cumsum, masses)
def pdf(self, mass):
if np.any(mass < self.min) or np.any(mass > self.max):
raise Exception('out of range')
right_ind = np.searchsorted(self.mass, mass)
left_ind = right_ind - 1
# find where we fall in interval between masses
f = (mass - self.mass[left_ind]) / (self.mass[right_ind] - self.mass[left_ind])
return f * self.prob[right_ind] + (1-f) * self.prob[left_ind]
def rvs(self, *args, **kwargs):
return self.inv_cdf(np.random.rand(size))
from scipy.stats import lognorm
mp = MassPrior(mf.m*h, mf.dndm / sum(mf.dndm))
def log_P_m(mass_h):
return np.log(mp.pdf(mass_h))
def log_P_l_given_m_z(mass_h, z, lum):
mu_l = alpha1 + alpha2 * np.log(mass_h / alpha3) + alpha4 * (1+z)
return lognorm(S, loc=mu_l).logpdf(lum)
def log_P_lobs_given_l(lum, lum_obs):
return lognorm(sigma_L, loc=lum).logpdf(lum_obs)
def log_weight(mass_h, lum, lum_obs, z):
print log_P_m(mass_h)
print log_P_l_given_m_z(mass_h, z, lum)
print log_P_lobs_given_l(lum, lum_obs)
return log_P_m(mass_h) + log_P_l_given_m_z(mass_h, z, lum) + log_P_lobs_given_l(lum, lum_obs)
def draw_m(size):
return mp.rvs(size=size)
def draw_lobs_given_l(lum):
return lognorm(sigma_L, loc=lum).rvs()
def draw_l_given_m_z(m, z):
mu_l = np.exp(alpha1 + alpha2 * np.log(m / alpha3) + alpha4 * (1+z))
return lognorm(S, loc=mu_l).rvs()
np.random.seed(2)
alpha1b = norm(10.709, 0.022).rvs()
alpha2b = norm(0.359, 0.009).rvs()
alpha3b = 2.35e14
alpha4b = norm(1.10, 0.06).rvs()
Sb = norm(0.155, 0.0009).rvs()
sigma_Lb = 0.05
print ' alpha1b = {}\n alpha2b = {}\n alpha3b = {}\n alpha4b = {}\n Sb = {}\n sigma_Lb = {}'\
.format(alpha1b, alpha2b, alpha3b, alpha4b, Sb, sigma_Lb)
data = pd.read_csv('mock_data.csv')
def draw_biased_m_l(lobs, z):
l = lognorm(sigma_Lb, loc=lobs).rvs()
mu_m = alpha3b * (l / (np.exp(alpha1b) * (1 + z) ** alpha4b)) ** (1 / alpha2b)
m = lognorm(Sb, loc=mu_m).rvs()
return (m,l)
nsamples = 100
# data
z_data = data['z'].as_matrix()[0:nsamples]
lobs_data = data['lum_obs'].as_matrix()[0:nsamples]
# biased samples
m_biased, l_biased = draw_biased_m_l(lobs_data, z_data)
# unbiased samples
m = draw_m(nsamples)
l = draw_l_given_m_z(m, z_data)
log_weight(m[0], l_biased[0], lobs_data[0], z_data[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
return np.array([ 0.1 + (val * 0.8/255) for val in image_data])
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros([labels_count]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 100
learning_rate = 0.3
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1 - Data Quality (6 pts)
Step2: (a) - Signal to Noise Ratio (SNR) (1 pt)
Step3: (b) - Explainable Variance (EV) (1 pt)
Step4: (c) - Test how SNR and EV depend on the number of repetitions (3 pts)
Step5: (c - continued) Questions
Step6: (d - continued) Question
Step7: Problem 2 - Gradient Descent (12 pts)
Step8: (a) Gradient Descent (3 pts)
Step9: (b) Coordinate Descent (3 pts)
Step10: (c) Gradient descent with early stopping (3 pts)
Step11: Explain what's going on in the error plots here.
Step12: Problem 3 - Regression
Step13: (b) Solve the (noiseless) 2-feature problem with ridge (4 pts)
Step14: Problem 4 - Regression on a high-dimensional problem (8 pts)
Step15: (a) How well could we possibly do at this problem? (3 pts)
Step16: ii. What is the minimum possible MSE on the training set and on the validation set? (1 pt)
Step17: iii. What would MSE be on the training and validation sets if all $\beta=0$? (1 pt)
Step18: (b) Solve the high-dimensional problem with OLS (1 pt)
Step19: Explain what the heck is going on here.
Step20: ii. Plot the training MSE and validation MSE as a function of lambda. Plot horizontal lines that show the theoretical minimum and maximum MSE (i.e. when beta=0) on the validation set, which you computed above. Explain what you see. (2 pts)
Step21: Explanation here.
|
<ASSISTANT_TASK:>
Python Code:
# Dependencies
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Make testing data
def q_make_data(signal_size, n_repeats, n_timepoints):
signal = np.random.randn(n_timepoints)
data = np.random.randn(n_repeats, n_timepoints) + (signal_size ** 0.5 * signal)
return data
q_data = q_make_data(signal_size=0.5, n_repeats=50, n_timepoints=300)
# The scenario: you've done 50 repeats of the same 300-second experiment,
# while measuring the output of 1 neuron
# q_data is a 50 x 300 matrix with the output at each second in each of the 50 repeats
print q_data.shape
def snr_func(data):
## YOUR CODE HERE ##
print('Estimated SNR:', snr_func(q_data))
def ev_func(data):
## YOUR CODE HERE ##
print('Estimated EV:', ev_func(q_data))
n_tests = 150
n_repeats = np.arange(5, 45, 5)
snr_estimates = np.array([[snr_func(q_make_data(signal_size=0.5, n_repeats=r, n_timepoints=300))
for _ in range(n_tests)]
for r in n_repeats])
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.errorbar(n_repeats, snr_estimates.mean(1), yerr=snr_estimates.std(1))
plt.xlabel('Number of repeats')
plt.ylabel('Estimated SNR')
plt.grid()
plt.subplot(1,2,2)
plt.plot(n_repeats, snr_estimates.std(1), 'o-')
plt.xlabel('Number of repeats')
plt.ylabel('Standard deviation of estimated SNR')
plt.grid();
ev_estimates = np.array([[ev_func(q_make_data(signal_size=0.5, n_repeats=r, n_timepoints=300))
for _ in range(n_tests)]
for r in n_repeats])
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.errorbar(n_repeats, ev_estimates.mean(1), yerr=ev_estimates.std(1))
plt.xlabel('Number of repeats')
plt.ylabel('Estimated EV')
plt.grid();
plt.subplot(1,2,2)
plt.plot(n_repeats, ev_estimates.std(1), 'o-')
plt.xlabel('Number of repeats')
plt.ylabel('Standard deviation of estimated EV')
plt.grid();
n_tests = 150
n_timepoints = np.arange(100, 500, 50)
snr_estimates = np.array([[snr_func(q_make_data(signal_size=0.3, n_repeats=50, n_timepoints=t))
for _ in range(n_tests)]
for t in n_timepoints])
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.errorbar(n_timepoints, snr_estimates.mean(1), yerr=snr_estimates.std(1))
plt.xlabel('Number of timepoints')
plt.ylabel('Estimated SNR')
plt.grid()
plt.subplot(1,2,2)
plt.plot(n_timepoints, snr_estimates.std(1), 'o-')
plt.xlabel('Number of timepoints')
plt.ylabel('Standard deviation of estimated SNR')
plt.grid();
ev_estimates = np.array([[ev_func(q_make_data(signal_size=0.3, n_repeats=50, n_timepoints=t))
for _ in range(n_tests)]
for t in n_timepoints])
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.errorbar(n_timepoints, ev_estimates.mean(1), yerr=ev_estimates.std(1))
plt.xlabel('Number of timepoints')
plt.ylabel('Estimated EV')
plt.grid();
plt.subplot(1,2,2)
plt.plot(n_timepoints, ev_estimates.std(1), 'o-')
plt.xlabel('Number of timepoints')
plt.ylabel('Standard deviation of estimated EV')
plt.grid();
# Insert code for answering the bonus question here, if you want!
# Make testing data
def gd_make_data(nsamp=100, noise=0):
# Generate a two dimensional stimulus (e.g two pixels) with correlations and 100 samples (e.g. points in time)
# First pixel data
x1 = np.random.randn(nsamp)
# Second pixel that is correlated with the first
x2 = .4 * x1 + .6 * np.random.randn(nsamp)
# Concatinate into a stimulus matrix - here rows are dimensions and columns are time points.
x = np.vstack([x1, x2])
## Generate weights and the corresponding one dimensional response
# Set weights on each channel
b = np.array([1, 7])
# Make response of system - this is the output of our toy neuron
y = np.dot(x.T, b) + np.random.randn(nsamp) * noise
return x, y
x, y = gd_make_data()
# Plot timeseries
plt.plot(x[0])
plt.plot(x[1])
plt.plot(y);
# We are going to pretend we don't know h and make a search for h values by settting up
# a range of potential values for h1 and h2
b1, b2 = np.meshgrid(np.arange(-1, 10, .2), np.arange(-1, 10, .2))
bs = np.vstack([b1.ravel(), b2.ravel()])
# get responses from each set of weights
ys = np.dot(x.T, bs)
# calculate error between the response, y, and each of the possible responses, ys.
errfun = np.sum((y[:,None] - ys) ** 2, 0)
# reshape for plotting
errfun = errfun.reshape(b1.shape)
## plot contour of error surface. Note the shape of the surface is angled
# because the two variable are correlated.
plt.figure(figsize=(5,5))
plt.contour(b1, b2, errfun, 50, colors='k', alpha=0.3);
plt.axis('equal');
# Gradient descent!
steps = 100 # how many steps to take
eps = 0.001 # the size of each step
b_est = np.array([0.0, 0.0]) # store your current estimate of beta here
b_est_history = np.zeros([steps+1, 2]) # assume b_est_history[0] = result before you started
for ii in range(steps):
## YOUR CODE HERE ##
## plot contour of error surface and your regression path
plt.figure(figsize=(5,5))
plt.contour(b1, b2, errfun, 50, colors='k', alpha=0.3)
## YOUR CODE HERE ##
plt.axis('equal');
# Coordinate descent!
steps = 100 # how many steps to take
eps = 0.001 # the size of each step
b_est = np.array([0.0, 0.0])
b_est_history = np.zeros([steps+1, 2])
for ii in range(steps):
## YOUR CODE HERE ##
## plot contour of error surface and your regression path
plt.figure(figsize=(5,5))
plt.contour(b1, b2, errfun, 50, colors='k', alpha=0.3)
## YOUR CODE HERE ##
plt.axis('equal');
heldout_data = np.load('gd-heldout.npz')
trnx = heldout_data['trnx']
trny = heldout_data['trny']
valx = heldout_data['valx']
valy = heldout_data['valy']
# Gradient descent!
steps = 100
eps = 0.001
b_est = np.array([0.0, 0.0])
b_est_history = np.zeros([steps+1, 2])
trn_err_history = np.zeros([steps])
val_err_history = np.zeros([steps])
for ii in range(steps):
## YOUR CODE HERE ##
## plot the training and validation error as a function of step number
plt.figure()
## YOUR CODE HERE ##
plt.legend();
print('Best step in held-out set:', val_err_history.argmin(), 'Weights:', b_est_history[val_err_history.argmin()])
print('Where gradient descent ended up:', b_est_history[-1])
## plot the betas along the way
plt.figure(figsize=(5,5))
plt.contour(b1, b2, errfun, 50, colors='k', alpha=0.3)
## YOUR CODE HERE ##
plt.axis('equal');
# Coordinate descent!
steps = 100
eps = 0.001
b_est = np.array([0.0, 0.0])
b_est_history = np.zeros([steps+1, 2])
trn_err_history = np.zeros([steps])
val_err_history = np.zeros([steps])
for ii in range(steps):
## YOUR CODE HERE ##
## plot the training and validation error as a function of step number
plt.figure()
## YOUR CODE HERE ##
plt.legend();
print('Best step in held-out set:', val_err_history.argmin())
print(b_est_history[val_err_history.argmin()])
print(b_est_history[-1])
## plot the beta path
plt.figure(figsize=(5,5))
plt.contour(b1, b2, errfun, 50, colors='k', alpha=0.3)
## YOUR CODE HERE ##
plt.axis('equal');
beta_ols = ## YOUR CODE HERE ##
plt.figure(figsize=(5,5))
plt.contour(b1, b2, errfun, 50, colors='k', alpha=0.3)
## YOUR CODE HERE ##
plt.axis('equal');
lambdas = np.logspace(0, 4, 10)
betas_ridge = np.zeros((len(lambdas), 2))
for ii in range(len(lambdas)):
## YOUR CODE HERE ##
## Plot the ridge solutions on the error contours
plt.figure(figsize=(5,5))
plt.contour(b1, b2, errfun, 50, colors='k', alpha=0.3)
## YOUR CODE HERE ##
plt.axis('equal');
# Generate high-dimensional data
n_features = 400 # the number of features
n_timepoints = 600 # the number of timepoints
n_training = 450 # the number of timepoints that we'll use for training
noise_level = 5.0 # how much noise to add
# generate the "true" betas, the ones that will be used to generate the data
beta_true = np.random.randn(n_features)
# generate the feature matrix, x
# this uses a trick to make the different features in x be pretty correlated
u,s,vh = np.linalg.svd(np.random.randn(n_timepoints, n_features), full_matrices=False)
x_all = (u*(s**5)).dot(vh)
x_all /= x_all.max()
# generate the responses, y = x . beta + noise
y_all = x_all.dot(beta_true) + np.random.randn(n_timepoints) * noise_level
# split x and y into training part (first n_training timepoints) ..
x = x_all[:n_training]
y = y_all[:n_training]
# .. and validation part (remaining timepoints)
x_val = x_all[n_training:]
y_val = y_all[n_training:]
# plot y, let's see what it looks like
plt.plot(y_all);
def mean_squared_error(z, z_hat):
return ## YOUR CODE HERE ##
best_trn_mse = ## YOUR CODE HERE ##
best_val_mse = ## YOUR CODE HERE ##
print('Best possible MSE on training set:')
print(best_trn_mse)
print('Best possible MSE on validation set:')
print(best_val_mse)
betazero_trn_mse = ## YOUR CODE HERE ##
betazero_val_mse = ## YOUR CODE HERE ##
print('MSE on training set with beta=0:')
print(betazero_trn_mse)
print('MSE on validation set with beta=0:')
print(betazero_val_mse)
beta_ols = ## YOUR CODE HERE ##
y_hat = ## YOUR CODE HERE ##
y_val_hat = ## YOUR CODE HERE ##
print('Training MSE:', mean_squared_error(y, y_hat))
print('Validation MSE:', mean_squared_error(y_val, y_val_hat))
lambdas = np.logspace(-3, 5, 10) # let's check 10 lambdas between 10^-3 and 10^5. play with this range if you like
betas_ridge = np.zeros((len(lambdas), n_features))
trn_mse = np.zeros(len(lambdas))
val_mse = np.zeros(len(lambdas))
for ii in range(len(lambdas)):
## YOUR CODE HERE ##
## YOUR CODE HERE ##
## YOUR CODE HERE ##
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: SOLUTION
Step3: Thus the load impedance referred to the primary side is
Step4: The referred secondary current is $I_s' = \frac{V_P}{Z_{EQ} Z_L'}$
Step5: and the referred secondary voltage is $V_s' = I_s' Z_L'$
Step6: The actual secondary voltage is thus
Step7: And the voltage regulation with $VR = \frac{V_P-V_S'}{V_S'}$ is therefore
Step8: (b)
Step9: Thus the load impedance referred to the primary side is
Step10: The referred secondary current is $I_s' = \frac{V_P}{Z_{EQ} X_L'}$
Step11: and the referred secondary voltage is $V_s' = I_s' X_L'$
Step12: The actual secondary voltage is thus
Step13: And the voltage regulation with $VR = \frac{V_P-V_S'}{V_S'}$ is therefore
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
%precision 4
Zeq = 20.0 + 100.0j # [Ohm]
Rc = 100.0e3 # [Ohm]
Xm = 20.0e3 # [Ohm]
Zload = 2.0 + 0.7j # [Ohm]
Xload = -3.0j # [Ohm]
Vp = 7967.0 # [V]
a = 8000/230.0
a
Zloadp = a**2 * Zload
Zloadp
Isp = Vp/ (Zeq + Zloadp)
print('Isp = {:.3f} A ∠{:.2f}°'.format(
abs(Isp), angle(Isp, deg=True)))
Vsp = Isp * Zloadp
print('Vsp = {:.1f} V ∠{:.2f}°'.format(
abs(Vsp), angle(Vsp, deg=True)))
Vs = Vsp /a
print('Vs = {:.1f} V {:.2f}°'.format(
abs(Vs), angle(Vs, deg=True)))
print('===================')
VR = (abs(Vp) - abs(Vsp)) / abs(Vsp) * 100
print('VR = {:.2f} %'.format(VR))
print('===========')
a # already defined earlier...
Xloadp = a**2 * Xload
Xloadp
Isp = Vp/ (Zeq + Xloadp)
print('Isp = {:.3f} A ∠{:.2f}°'.format(
abs(Isp), angle(Isp, deg=True)))
Vsp = Isp * Xloadp
print('Vsp = {:.2f} V ∠{:.2f}°'.format(
abs(Vsp), angle(Vsp, deg=True)))
Vs = Vsp /a
print('Vs = {:.1f} V ∠{:.2f}°'.format(
abs(Vs), angle(Vs, deg=True)))
print('====================')
VR = (abs(Vp) - abs(Vsp)) / abs(Vsp) * 100
print('VR = {:.2f} %'.format(VR))
print('============')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Why is this a bad idea?
Step2: We are going to
Step3: Example
Step4: Example
Step5: Getting a copy
Step6: Advanced operations
Step7: This has the raw read count data. However, each sample gets a different number of reads, so we want to normalize by the library size, which is the total number of reads across a column.
Step8: Exercise
Step9: However, NumPy will automatically prepend singleton dimensions until the array shapes match or there is an error
Step10: We also multiply by $10^6$ in order to keep the numbers on a readable scale (reads per million reads).
Step11: Finally, longer genes are more likely to produce reads. So we normalize by the gene length (in kb) to produce a measure of expression called Reads Per Kilobase per Million reads (RPKM).
Step12: Exercise
Step13: Exercise
Step14: Exercise
Step16: Exercise
Step17: Fancy indexing
Step19: Exercise
Step20: Advanced exercise Jack's dilemma
Step21: Even more advanced
|
<ASSISTANT_TASK:>
Python Code:
gene0 = [100, 200, 50, 400]
gene1 = [50, 0, 0, 100]
gene2 = [350, 100, 50, 200]
expression_data = [gene0, gene1, gene2]
import numpy as np
a = np.array(expression_data)
print(a)
def print_info(a):
print('number of elements:', a.size)
print('number of dimensions:', a.ndim)
print('shape:', a.shape)
print('data type:', a.dtype)
print('strides:', a.strides)
print('flags:')
print(a.flags)
print_info(a)
print(a.data)
abytes = a.ravel().view(dtype=np.uint8)
print_info(abytes)
print(abytes[:24])
print_info(a)
print_info(a.T)
print_info(a.T)
print_info(a.T[::2])
print_info(a.T[::2, ::2])
b = a
print(b)
a[0, 0] = 5
print(b)
a[0, 0] = 100
expr = np.load('expr.npy')
print_info(expr)
lib_size = np.sum(expr, axis=0)
print(expr.shape)
print(lib_size.shape)
print(lib_size[np.newaxis, :].shape)
np.all(expr / lib_size ==
expr / lib_size[np.newaxis, :])
expr_lib = expr / lib_size
expr_lib *= 1e6
gene_len = np.load('gene-lens.npy')
print(gene_len.shape)
rpkm = expr_lib # FIX THIS
from matplotlib import pyplot as plt
from scipy import stats
def plot_col_density(data, xlim=None, *args, **kwargs):
# Use gaussian smoothing to estimate the density
density_per_col = [stats.kde.gaussian_kde(col) for col in data.T]
if xlim is not None:
m, M = xlim
else:
m, M = np.min(data), np.max(data)
x = np.linspace(m, M, 100)
plt.figure()
for density in density_per_col:
plt.plot(x, density(x), *args, **kwargs)
plt.xlabel('log-counts')
plt.ylabel('frequency')
if xlim is not None:
plt.xlim(xlim)
plt.show()
%matplotlib inline
plt.style.use('ggplot')
plot_col_density(np.log(expr+1))
plot_col_density(np.log(rpkm + 1), xlim=(0, 250))
x = np.random.rand(3, 5)
y = np.random.randint(10, size=8)
z = x # FIX THIS
def repeat(arr, n):
return np.lib.stride_tricks.as_strided(arr,
shape=(n,) + arr.shape,
strides=(0,) + arr.strides)
repeat(np.random.rand(5), 4)
def sliding_window(arr, size=2):
Produce an array of sliding window views of `arr`
Parameters
----------
arr : 1D array, shape (N,)
The input array.
size : int, optional
The size of the sliding window.
Returns
-------
arr_slide : 2D array, shape (N - size - 1, size)
The sliding windows of size `size` of `arr`.
Examples
--------
>>> a = np.array([0, 1, 2, 3])
>>> sliding_window(a, 2)
array([[0, 1],
[1, 2],
[2, 3]])
return arr # fix this
# test your code here
sliding_window(np.arange(8), 3)
values = np.array([0, 5, 99])
selector = np.random.randint(0, 3, size=(3, 4))
print(selector)
print(values[selector])
def qnorm(x):
Quantile normalize an input matrix.
Parameters
----------
x : 2D array of float, shape (M, N)
The input data, with each column being a
distribution to normalize.
Returns
-------
xn : 2D array of float, shape (M, N)
The normalized data.
xn = np.copy(x) # replace this by normalizing code
return xn
logexpr = np.log(expr + 1)
logrpkm = np.log(rpkm + 1)
logexprn = qnorm(logexpr)
logrpkmn = qnorm(logrpkm)
plot_col_density(logexprn)
plot_col_density(logrpkmn, xlim=(0, 0.25))
# "data"
ni, nj, nk = (10, 15, 20)
amplitude = np.random.rand(ni, nj, nk)
horizon = np.random.randint(5, 15, size=(ni, nj))
import numpy as np
from mutable_str import MutableString
s = MutableString('abcde')
# --- EDIT THIS SECTION ---
# Create an array interface to this foreign object
s.__array_interface__ = {'data' : (XXX, False), # (ptr, is read_only?)
'shape' : XXX,
'typestr' : '|u1', # typecode unsigned character
}
# --- EDIT THIS SECTION ---
print 'String before converting to array:', s
sa = np.asarray(s)
print 'String after converting to array:', sa
sa += 2
print 'String after adding "2" to array:', s
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The most basic usage of set and get
Step2: We can override the existing value by calling set method for the same key
Step3: It is also possible to pass an expiration value to the key by using setex method
Step4: A dictionary can be inserted like this
Step5: To get multiple keys' values, we can use mget. If a non-existing key is also passed, Redis return None for that key
|
<ASSISTANT_TASK:>
Python Code:
import redis
r = redis.Redis(decode_responses=True)
r.ping()
r.set("full_name", "john doe")
r.exists("full_name")
r.get("full_name")
r.set("full_name", "overridee!")
r.get("full_name")
r.setex("important_key", 100, "important_value")
r.ttl("important_key")
dict_data = {
"employee_name": "Adam Adams",
"employee_age": 30,
"position": "Software Engineer",
}
r.mset(dict_data)
r.mget("employee_name", "employee_age", "position", "non_existing")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's try the path for a single path of 2 lights and 1 wand and assuming a waiting time of 0 seconds. This is, he will not consider for how many seconds he needs to wait, he will automatically use the magic wand even if he needs to wait 1 second.
Step2: Now, let's optimize this, let's see which would be the optimal value he needs to wait to use the magic wand. For this we compute all the possible waiting times from (0 to 80) and see where the optimal value is.
Step3: One can see that the problem presents a multidimensional optimization problem, being the dimension the number of traffic lights minus 1. Now, let's optimize the case for 3 traffic lights
|
<ASSISTANT_TASK:>
Python Code:
MAX_TIME = 80. # max time waiting at traffic light
class TrafficLightPath:
'''Class that computes the probabilities of a traffic light path over itself and the
future (children) traffic lights.
'''
p = 0 # probability of this path
T = 0 # expected time of this path
Nw = 0 # remaining uses of the magic wand
Nl = 0 # remaining traffic lights
Lvec = [] # vector of thresholds when waiting for the red light
childrenPaths = [] # array of TrafficLightPath with the future path
def __init__ (self, p, T, Nw, Nl, Lvec):
'''Creates the current path and recursively compute the path
'''
self.p = p
self.T = T
self.Nw = Nw
self.Nl = Nl
self.Lvec = [float(x) for x in Lvec]
self.childrenPaths = []
if self.getCurrentPosInPath()==-1:
self.p = 1
self.T = 0
if Nl > 0:
self.computeChildrenPaths ()
def computeChildrenPaths (self):
'''Creates the future path possibilities
'''
if self.Nw >= self.Nl:
# no need to wait, use magic wand
self.childrenPaths.append(TrafficLightPath(1, 0, self.Nw-1, self.Nl-1, self.Lvec))
else:
# don't have a magic wand
if self.Nw == 0:
# green light
self.childrenPaths.append(TrafficLightPath(0.5, 0, self.Nw, self.Nl-1, self.Lvec))
# red light
self.childrenPaths.append(TrafficLightPath(0.5, MAX_TIME/2 , self.Nw, self.Nl-1, self.Lvec))
else:
# can decide whether to wait or stay
# green ligth
self.childrenPaths.append(TrafficLightPath(0.5, 0, self.Nw, self.Nl-1, self.Lvec))
# wait, don't use wand
L = self.Lvec[self.getCurrentPosInPath()+1]
self.childrenPaths.append(TrafficLightPath(L/(2*MAX_TIME), L/2, self.Nw, self.Nl-1, self.Lvec))
# don't wait, use wand
self.childrenPaths.append(TrafficLightPath((MAX_TIME-L)/(2*MAX_TIME), 0, self.Nw-1, self.Nl-1, self.Lvec))
def getCurrentPosInPath(self):
'''Returns the current position in the path
'''
return len(self.Lvec)-self.Nl-1
def printPath (self):
'''Prints the path and future paths with indentation
'''
pos = self.getCurrentPosInPath()+2
print '-'*4*pos + ' pos=' + str(pos-2)
print '-'*4*pos + ' p=' + str(self.p)
print '-'*4*pos + ' T=' + str(self.T)
print '-'*4*pos + ' Nw=' + str(self.Nw)
print '-'*4*pos + ' Nl=' + str(self.Nl)
print '-'*4*pos + ' L=' + str(self.Lvec[pos-2])
print '-'*4*pos
for item in self.childrenPaths:
item.printPath()
def computeMeanWaitingTimes (self, a_total_time=[], prob_path=1):
'''Computes the mean waiting time for this path including the children
The probability of the current path is the probability of this
path times the probability of the children
'''
a_total_time.append(self.p*prob_path*self.T) # total time for this light
new_prob_path = prob_path * self.p # prepare the prob path for children
for item in self.childrenPaths:
a_total_time = item.computeMeanWaitingTimes(a_total_time=a_total_time, prob_path=new_prob_path)
return a_total_time
def computeTotalMeanWaitingTime (self):
times = self.computeMeanWaitingTimes(a_total_time=[])
return sum(times)
path = TrafficLightPath(1, 0, 1, 2, [0, 0])
path.printPath()
print 'Total waiting time ' + str(path.computeTotalMeanWaitingTime()) + ' seconds'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
TwL = pd.Series(index=np.arange(0,80,0.1))
N_WANDS = 1
N_LIGHTS = 2
for i in TwL.index:
path = TrafficLightPath(1, 0, N_WANDS, N_LIGHTS ,[i, np.nan])
TwL.loc[i] = path.computeTotalMeanWaitingTime()
TwL.plot()
plt.hlines(TwL.min(), 0, TwL.index[TwL==TwL.min()][0], linestyles='--')
plt.vlines(TwL.index[TwL==TwL.min()][0], 0, TwL.min(), linestyles='--')
plt.title ('Mean waiting time')
plt.xlabel('Time to wait for using the magic wand [s]')
plt.ylabel('Total mean waiting time')
plt.show()
print 'Optimal waiting time is ' + str(TwL.min()) + ' seconds.'
N_WANDS = 1
N_LIGHTS = 3
[L1vec, L2vec] = np.meshgrid(np.arange(0,80, 1), np.arange(0,80, 1))
Tw = np.zeros(L1vec.shape)
for i in range(Tw.shape[0]):
for j in range(Tw.shape[1]):
path = TrafficLightPath(1,0, N_WANDS, N_LIGHTS, [L1vec[i,j], L2vec[i,j], np.nan])
Tw[i,j] = path.computeTotalMeanWaitingTime()
min_ind = np.unravel_index(Tw.argmin(), Tw.shape)
path = TrafficLightPath(1, 0, N_WANDS, N_LIGHTS,[L1vec[min_ind[0], min_ind[1]], L2vec[min_ind[0], min_ind[1]], np.nan])
if path.computeTotalMeanWaitingTime() == Tw.min():
print 'Min time = ' + str(Tw.min())
print 'L1 = ' + str(L1vec[min_ind[0], min_ind[1]])
print 'L2 = ' + str(L2vec[min_ind[0], min_ind[1]])
plt.imshow(Tw)
plt.hlines(min_ind[0], 0, min_ind[1], linestyles='--')
plt.vlines(min_ind[1], 0, min_ind[0], linestyles='--')
plt.colorbar()
plt.show()
print 'Optimal waiting time is ' + str(Tw.min()) + ' seconds.'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: When asking the professor a question, use the STAR approach
Step2: The wait command forces the system to finish processing the child process before the next command is run.
Step3: Parallel grep (my attempt)
|
<ASSISTANT_TASK:>
Python Code:
# Use this for interactive plots
%matplotlib notebook
import matplotlib.pyplot as plt
import pandas as pd
pd.Series([1,2,3,4]).plot()
!grep Guido data/week1/LICENSE.txt
!cat data/week1/LICENSE.txt data/week1/LICENSE.txt | wc -w
!cat data/week1/LICENSE.txt data/week1/LICENSE.txt | head
%%bash
for term in Python Guido Scala license
do
grep $term data/week1/LICENSE.txt | wc -l
done
%%bash
for ((num=0; num<=5; num++))
do
echo "I have $num cats"
done
%%bash
tail -n 115 data/week1/LICENSE.txt | head | cut -f 1-2 -d " "
%%bash
find /Users/BlueOwl1/Documents -name pdf | paste -s -d : - | cat
!echo "scale=10; 4.32*(3/7)+1.23" | bc
%%bash
for num in {1..10}
do
# For modulo to work, scale must be 0
echo "(1+$num) % 3" | bc
done
%%bash
for num in {1..20}
do
if [ $[$num%15] = 0 ]; then
echo fizzbuzz
elif [ $[$num%3] = 0 ]; then
echo fizz
elif [ $[$num%5] = 0 ]; then
echo buzz
else
echo $num
fi
done
%%bash
seq 15 | paste -sd+ -
%%bash
seq 1000000 | wc &
echo "Finished waiting"
%%bash
seq 1000000 | wc &
wait; echo "Finished waiting"
%%bash
mkdir pgrep_temp_files
cd pgrep_temp_files
split -l 10 -a 5 ../week1/CountOfMonteCristo.txt pgrep_temp_files
for file in pgrep_temp_files*
do
grep "Python" $file &
done
# Return to original directory
cd ..
# Remove file that was created
rm -fr pgrep_temp_files
%%timeit
!grep Python week1/CountOfMonteCristo.txt > /dev/null
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Doing it the classical way, we compile an orchestra and a score file, passing also some flags. Note that the first argument, indicating the program name, is mandatory. This is due to the C origin of Csound. You can give as first argument to cs.compile_ any string such as "dummy" or "myProgram", etc. The usage, calling C programs, is to pass the program name.
Step2: We can do the same with a single input file (.csd file)
Step3: Csound offers also the possibility to compile directly a csd file. As this can be done during a performance to replace or add new instruments and events (more on this later), cs.start() has to be called here, because Csound is not actually running a performance (due to the preceding cs.reset()
Step4: Finally, instead of compiling a csd from an input file, we can do it from a string
Step5: Instead of performing an entire score with a single cs.perform() command, it is possible to run a loop, performing a buffer (ksmps samples long with cs.performKsmps(), or -b frames long with cs.performBuffer()) during each pass. Note that cs.performKsmps() and cs.performBuffer() return False while the score is not finished. Here, a dot is displayed for each pass in the loop
|
<ASSISTANT_TASK:>
Python Code:
import ctcsound
cs = ctcsound.Csound()
ret = cs.compile_("csound", "-o", "dac", "examples/02-a.orc", "examples/02-a.sco")
if ret == ctcsound.CSOUND_SUCCESS:
cs.perform()
cs.reset()
ret = cs.compile_("csound", "examples/02-a.csd")
if ret == ctcsound.CSOUND_SUCCESS:
cs.perform()
cs.reset()
ret = cs.compileCsd("examples/02-a.csd")
if ret == ctcsound.CSOUND_SUCCESS:
cs.start()
cs.perform()
cs.reset()
csd = '''
<CsoundSynthesizer>
<CsOptions>
-d -o dac -m0
</CsOptions>
<CsInstruments>
sr = 48000
ksmps = 100
nchnls = 2
0dbfs = 1
instr 1
idur = p3
iamp = p4
icps = cpspch(p5)
irise = p6
idec = p7
ipan = p8
kenv linen iamp, irise, idur, idec
kenv = kenv*kenv
asig poscil kenv, icps
a1, a2 pan2 asig, ipan
outs a1, a2
endin
</CsInstruments>
<CsScore>
i 1 0 1 0.5 8.06 0.05 0.3 0.5
e 1.5
</CsScore>
</CsoundSynthesizer>
'''
ret = cs.compileCsdText(csd)
if ret == ctcsound.CSOUND_SUCCESS:
cs.start()
cs.perform()
cs.reset()
ret = cs.compile_("csound", "examples/02-a.csd")
if ret == ctcsound.CSOUND_SUCCESS:
while not cs.performKsmps():
print('.', end='')
print()
cs.reset()
ret = cs.compile_("csound", "examples/02-a.csd")
if ret == ctcsound.CSOUND_SUCCESS:
while not cs.performBuffer():
print('.', end='')
print()
cs.reset()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: data download
Step2: get the master edgelist from CL
Step3: download scdb data from SCDB
Step4: network data
Step5: make graph
Step6: NLP data
Step7: make tf-idf matrix
Step8: Load network
Step9: compute snapshots
Step10: add textlength
Step11: update snapshots
|
<ASSISTANT_TASK:>
Python Code:
setup_data_dir(data_dir)
make_subnetwork_directory(data_dir, network_name)
download_op_and_cl_files(data_dir, network_name)
download_master_edgelist(data_dir)
download_scdb(data_dir)
# create the raw case metadata data frame in the raw/ folder
make_subnetwork_raw_case_metadata(data_dir, network_name)
# create clean case metadata and edgelist from raw data
clean_metadata_and_edgelist(data_dir, network_name)
make_graph(subnet_dir, network_name)
%%time
make_network_textfiles(data_dir, network_name)
%%time
make_tf_idf(text_dir, subnet_dir + 'nlp/')
# load the graph
G = ig.Graph.Read_GraphML(subnet_dir + network_name +'_network.graphml')
G.summary()
vertex_metrics = ['indegree', 'outdegree', 'degree',
'd_pagerank','u_pagerank',
'authorities', 'hubs',
'd_eigen', 'u_eigen']
#'d_betweenness', 'u_betweenness',
#'d_closeness', 'u_closeness']
# add recent citations
vertex_metrics += ['recentcite_' + str(t) for t in np.arange(1, 10 + 1)]
vertex_metrics += ['recentcite_' + str(t) for t in [15, 20, 25, 30, 35, 40]]
active_years = range(1900, 2015 + 1)
%%time
make_snapshot_vertex_metrics(G, active_years, vertex_metrics, subnet_dir)
G.vs['num_words'] = [0] * len(G.vs)
for op_id in G.vs['name']:
text = open(text_dir + op_id +'.txt', 'r').read()
num_words = len(text.split())
G.vs.find(name=op_id)['num_words'] = num_words
G.write_graphml(subnet_dir + network_name +'_network.graphml')
to_add = ['rev_pagerank', 'num_words']
to_add += ['citerank_' + str(t) for t in [1, 2, 5, 10, 20, 50]]
to_add = ['polyrank_' + str(t) for t in [1, 2, 5, 10, 20, 50]]
%%time
update_snapshot_vertex_metrics(G, active_years, to_add, subnet_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Building a Custom Widget
Step2: sync=True traitlets
Step3: Define the view
Step4: Render method
Step5: Test
Step6: Making the widget stateful
Step7: Accessing the model from the view
Step8: Dynamic updates
Step9: Test
Step10: Finishing
Step11: Updating the Javascript code
Step12: Getting and setting the value
Step13: Test
Step14: Trying to use the spinner with another widget.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function # For py 2.7 compat
from IPython.html import widgets
from IPython.utils.traitlets import Unicode
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView', sync=True)
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
});
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
// Define the HelloView
var HelloView = widget.DOMWidgetView.extend({
});
// Register the HelloView with the widget manager.
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var HelloView = widget.DOMWidgetView.extend({
// Render the view.
render: function(){
this.$el.text('Hello World!');
},
});
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
HelloWidget()
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView', sync=True)
value = Unicode('Hello World!', sync=True)
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var HelloView = widget.DOMWidgetView.extend({
render: function(){
this.$el.text(this.model.get('value'));
},
});
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var HelloView = widget.DOMWidgetView.extend({
render: function(){
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.$el.text(this.model.get('value'));
},
});
manager.WidgetManager.register_widget_view('HelloView', HelloView);
});
w = HelloWidget()
w
w.value = 'test'
from IPython.utils.traitlets import CInt
class SpinnerWidget(widgets.DOMWidget):
_view_name = Unicode('SpinnerView', sync=True)
value = CInt(0, sync=True)
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var SpinnerView = widget.DOMWidgetView.extend({
render: function(){
// jQuery code to create a spinner and append it to $el
this.$input = $('<input />');
this.$el.append(this.$input);
this.$spinner = this.$input.spinner({
change: function( event, ui ) {}
});
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
},
});
manager.WidgetManager.register_widget_view('SpinnerView', SpinnerView);
});
%%javascript
require(["widgets/js/widget", "widgets/js/manager"], function(widget, manager){
var SpinnerView = widget.DOMWidgetView.extend({
render: function(){
var that = this;
this.$input = $('<input />');
this.$el.append(this.$input);
this.$spinner = this.$input.spinner({
change: function( event, ui ) {
that.handle_spin();
},
spin: function( event, ui ) {
that.handle_spin();
}
});
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.$spinner.spinner('value', this.model.get('value'));
},
handle_spin: function() {
this.model.set('value', this.$spinner.spinner('value'));
this.touch();
},
});
manager.WidgetManager.register_widget_view('SpinnerView', SpinnerView);
});
w = SpinnerWidget(value=5)
w
w.value
w.value = 20
from IPython.display import display
w1 = SpinnerWidget(value=0)
w2 = widgets.IntSlider()
display(w1,w2)
from IPython.utils.traitlets import link
mylink = link((w1, 'value'), (w2, 'value'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the image objects. Input arguments
Step2: Define the instrument. Input arguments
Step3: Define the algorithm being used. Input arguments
Step4: Run it
Step5: Print the Zernikes Zn (n>=4)
Step6: plot the Zernikes Zn (n>=4)
Step7: Patrick asked the question
Step8: Now we do the forward raytrace using our wavefront solutions
Step9: We now trace the rays to the image plane. Lutxp and Lutyp are image coordinates for each (oversampled) ray. showProjection() makes the intensity image. Then, to down sample the image back to original resolution, we want to use the function downResolution() which is defined for the image class.
Step10: Now do the same thing for extra focal image
|
<ASSISTANT_TASK:>
Python Code:
from lsst.cwfs.instrument import Instrument
from lsst.cwfs.algorithm import Algorithm
from lsst.cwfs.image import Image, readFile, aperture2image, showProjection
import lsst.cwfs.plots as plots
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
fieldXY = [0,0]
I1 = Image(readFile('../tests/testImages/AuxTel2001/1579925613-16Pup_intra-0-1.fits'), fieldXY, Image.INTRA)
I2 = Image(readFile('../tests/testImages/AuxTel2001/1579925662-16Pup_extra-0-1.fits'), fieldXY, Image.EXTRA)
I1p = Image(readFile('../tests/testImages/AuxTel2001/1579925833-16Pup_intra-0-1.fits'), fieldXY, Image.INTRA)
I2p = Image(readFile('../tests/testImages/AuxTel2001/1579925882-16Pup_extra-0-1.fits'), fieldXY, Image.EXTRA)
plots.plotImage(I1.image,'intra')
plots.plotImage(I2.image,'extra')
plots.plotImage(I1p.image,'intra')
plots.plotImage(I2p.image,'extra')
inst=Instrument('AuxTel',I1.sizeinPix)
print("Expected image diameter in pixels = %.0f"%(inst.offset/inst.fno/inst.pixelSize))
I1.image = I1.image[300-80:300+80,400-80:400+80]
I1.sizeinPix = I1.image.shape[0]
I2.image = I2.image[300-80:300+80,400-80:400+80]
I2.sizeinPix = I2.image.shape[0]
I1p.image = I1p.image[350-80:350+80,400-80:400+80]
I1p.sizeinPix = I1p.image.shape[0]
I2p.image = I2p.image[350-80:350+80,400-80:400+80]
I2p.sizeinPix = I2p.image.shape[0]
inst=Instrument('AuxTel',I1.sizeinPix)
plots.plotImage(I1.image,'intra')
plots.plotImage(I2.image,'extra')
plots.plotImage(I1p.image,'intra')
plots.plotImage(I2p.image,'extra')
algo=Algorithm('exp',inst,0)
algop=Algorithm('exp',inst,0)
algo.runIt(inst,I1,I2,'paraxial')
algop.runIt(inst,I1p,I2p,'paraxial')
print(algo.zer4UpNm)
print(algop.zer4UpNm)
plt.plot(range(4,23), algo.zer4UpNm,'b.-',label = '1')
plt.plot(range(4,23), algop.zer4UpNm,'r.-',label = '2')
plt.legend()
plt.grid()
#the Zernike do look a bit different,
#but,
#the images above (especially the intra focal images do look kind of different?)
plots.plotImage(I1.image0,'original intra',mask=I1.cMask)
plots.plotImage(I2.image0,'original extra', mask=I2.cMask)
nanMask = np.ones(I1.image.shape)
nanMask[I1.pMask==0] = np.nan
fig, ax = plt.subplots(1,2, figsize=[10,4])
img = ax[0].imshow(algo.Wconverge*nanMask, origin='lower')
ax[0].set_title('Final WF = estimated + residual')
fig.colorbar(img, ax=ax[0])
img = ax[1].imshow(algo.West*nanMask, origin='lower')
ax[1].set_title('residual wavefront')
fig.colorbar(img, ax=ax[1])
fig, ax = plt.subplots(1,2, figsize=[10,4])
img = ax[0].imshow(I1.image, origin='lower')
ax[0].set_title('Intra residual image')
fig.colorbar(img, ax=ax[0])
img = ax[1].imshow(I2.image, origin='lower')
ax[1].set_title('Extra residual image')
fig.colorbar(img, ax=ax[1])
oversample = 10
projSamples = I1.image0.shape[0]*oversample
luty, lutx = np.mgrid[
-(projSamples / 2 - 0.5):(projSamples / 2 + 0.5),
-(projSamples / 2 - 0.5):(projSamples / 2 + 0.5)]
lutx = lutx / (projSamples / 2 / inst.sensorFactor)
luty = luty / (projSamples / 2 / inst.sensorFactor)
lutxp, lutyp, J = aperture2image(I1, inst, algo, algo.converge[:,-1], lutx, luty, projSamples, 'paraxial')
show_lutxyp = showProjection(lutxp, lutyp, inst.sensorFactor, projSamples, 1)
I1fit = Image(show_lutxyp, fieldXY, Image.INTRA)
I1fit.downResolution(oversample, I1.image0.shape[0], I1.image0.shape[1])
luty, lutx = np.mgrid[
-(projSamples / 2 - 0.5):(projSamples / 2 + 0.5),
-(projSamples / 2 - 0.5):(projSamples / 2 + 0.5)]
lutx = lutx / (projSamples / 2 / inst.sensorFactor)
luty = luty / (projSamples / 2 / inst.sensorFactor)
lutxp, lutyp, J = aperture2image(I2, inst, algo, algo.converge[:,-1], lutx, luty, projSamples, 'paraxial')
show_lutxyp = showProjection(lutxp, lutyp, inst.sensorFactor, projSamples, 1)
I2fit = Image(show_lutxyp, fieldXY, Image.EXTRA)
I2fit.downResolution(oversample, I2.image0.shape[0], I2.image0.shape[1])
#The atmosphere used here is just a random Gaussian smearing. We do not care much about the size at this point
from scipy.ndimage import gaussian_filter
atmSigma = .6/3600/180*3.14159*21.6/1.44e-5
I1fit.image[np.isnan(I1fit.image)]=0
a = gaussian_filter(I1fit.image, sigma=atmSigma)
fig, ax = plt.subplots(1,3, figsize=[15,4])
img = ax[0].imshow(I1fit.image, origin='lower')
ax[0].set_title('Forward prediction (no atm) Intra')
fig.colorbar(img, ax=ax[0])
img = ax[1].imshow(a, origin='lower')
ax[1].set_title('Forward prediction (w atm) Intra')
fig.colorbar(img, ax=ax[1])
img = ax[2].imshow(I1.image0, origin='lower')
ax[2].set_title('Real Image, Intra')
fig.colorbar(img, ax=ax[2])
I2fit.image[np.isnan(I2fit.image)]=0
b = gaussian_filter(I2fit.image, sigma=atmSigma)
fig, ax = plt.subplots(1,3, figsize=[15,4])
img = ax[0].imshow(I2fit.image, origin='lower')
ax[0].set_title('Forward prediction (no atm) Extra')
fig.colorbar(img, ax=ax[0])
img = ax[1].imshow(b, origin='lower')
ax[1].set_title('Forward prediction (w atm) Extra')
fig.colorbar(img, ax=ax[1])
img = ax[2].imshow(I2.image0, origin='lower')
ax[2].set_title('Real Image, Extra')
fig.colorbar(img, ax=ax[2])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets set our seeds for the environment and pull in our data.
Step2: Now lets take a look at the data we are given.
Step3: As we can see we have 10 columns of features, 2 of those are of type object, with no null values in the data. Next we need to look at what type of data is in the two columns with the type object.
Step4: At first glance it looks like the column "sales" is holding nominal data and the column "salary" is holding ordinal data. Lets see how many unique values we have in each column.
Step5: So the nominal data has 10 categories and the ordinal data has 3 categories. Now we need to convert this data into something that our nerual network can work with. The way we are going to handle converting this data is by breaking these categories down into their own binary features.
Step6: Now that we have broken down the categories lets get rid of our original sales and salary columns.
Step7: Since the feature we are wanting to predict is whether the employee has left or not, we should look at the percentages of left versus stayed in our data set.
Step8: So we have about ~76% of people who stayed and ~24% of people who left. When we split our data into the training and test sets we want to try and maintain these percentages in our new distributions. Lets create a function that will do this for us.
Step9: Now we have our stratified samples. However, just to make sure, we can look at our training set percentages.
Step10: It seems our sample size's percentages match perfectly. So now lets split out our training set into the data and the data labels. Also lets grab the number of features we are working with.
Step11: Now lets start defining our model. The model we will create will have 2 hidden layers and an output layer. Lets start with defining the inputs and labels.
Step12: Now lets create our first hidden layer. Typically your hidden layers will use a Rectified Linear Unit(Relu) activation function in these cases, but in our case we will use an Exponential Linear Unit(Elu) activation function for its nice properties of reducing the bias shift effect on our network to have faster learning than Relu and for the fact that it acts like batch normalization without the computational complexity. We will also add the caveat of initalizing our weights and biases with a standard deviation of 0.01.
Step13: Our second hidden layer will be the same but adjusted for the new input shape.
Step14: And for our final output layer we will use the sigmoid function for our activation function because we want our output to be between 0 and 1.
Step15: Next we will define our cost function, which will be the cross entropy loss function.
Step16: For our optimizer we will use Adam with a learning rate of 1e-3.
Step17: Lets do some tensorflow setup and define an array to store our loss function values when we train our model.
Step18: I have played around with the values and have found decent numbers to use for our epoch and batch size in our model. We will use 600 epochs and a batch size of 50.
Step19: Now lets set up our training and then print out our final cost value.
Step20: Here is what the loss function looks like graphically.
Step21: Now lets grab the predictions of our training set data.
Step22: Lets define a function that will give us a confusion matrix of our data and show us the F1 Score and the Total Accuracy.
Step23: Now that we have defined our function to print out the metrics that we want to see, lets test out two different thresholds for analyzing our data. We will first use the default 70% threshold that we had set in our function and then we will try a much smaller threshold, like 33%.
Step24: As we can see, just using the threshold around 70% we get decent scores with Precision being ~89%, Recall being ~97%, F1 Score around ~93%, and Total Accuracy around ~97%. However, if we bump down the threshold to 33% we have our Precision and Recall values closer to the same value and our F1 Score and Total Accuracy dropped but only around ~0.3%. So it seems that we should use 33% as a threshold value but we will see if this is a good idea by testing with our test set.
Step25: So it looks like, even with our test set, the 33% is a better threshold to use, to keep our Precision and Recall values close together. Now the last thing to check is to compare our cost values for the training set and test set to make sure we are not overfitting or underfitting the data.
|
<ASSISTANT_TASK:>
Python Code:
## Using magic commands for set up and showing working versions
%matplotlib inline
%load_ext version_information
%version_information tensorflow, numpy, pandas, matplotlib
import tensorflow as tf
import pandas as pd
import numpy as np
np.random.seed(7)
tf.set_random_seed(7)
init_data = pd.read_csv("./HR_comma_sep.csv")
print(init_data.info())
print("Sales: {0}".format(init_data["sales"][:5]))
print("Salary: {0}".format(init_data["salary"][:5]))
sales_unique_n = init_data["sales"].nunique()
salary_unique_n = init_data["salary"].nunique()
print("Unique sale categories: {0}".format(sales_unique_n))
print("Unique salary categories: {0}".format(salary_unique_n))
sales_unique_feature_names = init_data["sales"].unique()
salary_unique_feature_names = init_data["salary"].unique()
# Function to breakdown a category into individual binary features
def break_down_features(feature_list, category, orig_data):
for name in feature_list:
orig_data[category+"_"+name] = [1 if x == name else 0 for _, x in enumerate(orig_data[category])]
break_down_features(sales_unique_feature_names, "sales", init_data)
break_down_features(salary_unique_feature_names, "salary", init_data)
init_data = init_data.drop(["sales", "salary"], axis=1)
print(init_data["left"].value_counts() / len(init_data["left"]))
def stratified_split_data(data, ratio):
# Grab the data into its own category
stayed_data = data.loc[data["left"] == 0]
left_data = data.loc[data["left"] == 1]
# mix up the data
stayed_data = stayed_data.iloc[np.random.permutation(len(stayed_data))]
left_data = left_data.iloc[np.random.permutation(len(left_data))]
test_stayed_set_size = int(len(stayed_data) * ratio)
test_left_set_size = int(len(left_data) * ratio)
# Concatenate the partitioned data
train_set = pd.concat([stayed_data[test_stayed_set_size:], left_data[test_left_set_size:]], ignore_index=True)
test_set = pd.concat([stayed_data[:test_stayed_set_size], left_data[:test_left_set_size]], ignore_index=True)
# Now mix up the concatenated data
train_shuffled_indices = np.random.permutation(len(train_set))
test_shuffled_indices = np.random.permutation(len(test_set))
return train_set.iloc[train_shuffled_indices], test_set.iloc[test_shuffled_indices]
train_set, test_set = stratified_split_data(init_data, 0.2)
print(train_set["left"].value_counts() / len(train_set["left"]))
data = (train_set.drop("left", axis=1)).values
data_labels = train_set["left"].values
data_labels = data_labels.reshape([len(data_labels), 1])
num_features = data.shape[1]
X_init = tf.placeholder(tf.float32, [None, num_features])
Y_init = tf.placeholder(tf.float32, [None, 1])
w_1 = tf.Variable(tf.truncated_normal([num_features, 10], stddev=0.01))
b_1 = tf.Variable(tf.truncated_normal([10], stddev=0.01))
layer_1 = tf.nn.elu(tf.add(tf.matmul(X_init, w_1), b_1))
w_2 = tf.Variable(tf.truncated_normal([10, 8], stddev=0.01))
b_2 = tf.Variable(tf.truncated_normal([8], stddev=0.01))
layer_2 = tf.nn.elu(tf.add(tf.matmul(layer_1, w_2), b_2))
w_3 = tf.Variable(tf.truncated_normal([8, 1], stddev=0.01))
b_3 = tf.Variable(tf.truncated_normal([1], stddev=0.01))
output_layer = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, w_3), b_3))
cost = -tf.reduce_mean(tf.multiply(Y_init, tf.log(output_layer)) + (1 - Y_init)*tf.log(1 - output_layer) )
optimizer = tf.train.AdamOptimizer(1e-3).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
loss_values = []
num_epochs = 600
batch_size = 50
count = len(data) # helper variable for our mini-batch training
for epoch in range(num_epochs):
start_n = 0
c = None
while start_n < count:
sess.run(optimizer, feed_dict={X_init:data[start_n:(start_n + batch_size)], Y_init:data_labels[start_n:(start_n + batch_size)]})
start_n += batch_size
c = sess.run(cost, feed_dict={X_init:data, Y_init:data_labels})
loss_values.append(c)
print("Final cost = {0}".format(sess.run(cost, feed_dict={X_init:data, Y_init:data_labels})) )
import matplotlib.pyplot as plt
plt.plot(loss_values);
predictions = sess.run(output_layer, feed_dict={X_init:data})
def confusion_matrix(pred_data, act_data, threshold=0.7):
stayed_true = 0
stayed_false = 0
left_true = 0
left_false = 0
for i in range(len(pred_data)):
if pred_data[i][0] >= threshold and act_data[i][0] == 1:
left_true += 1
elif pred_data[i][0] < threshold and act_data[i][0] == 1:
left_false += 1
elif pred_data[i][0] >= threshold and act_data[i][0] == 0:
stayed_false += 1
elif pred_data[i][0] < threshold and act_data[i][0] == 0:
stayed_true += 1
precision = left_true/np.max([1e-5, (left_true + left_false)])
recall = left_true/np.max([1e-5, (left_true + stayed_false)])
f1_score = 2*((precision*recall)/(precision+recall))
print("Stayed True: {0}\nStayed False: {1}\nLeft True: {2}\nLeft False: {3}".format(stayed_true, stayed_false, left_true, left_false))
print("Precision = {0}".format(precision))
print("Recall = {0}".format(recall))
print("F1 score = {0}".format(f1_score))
print("Total Accuracy = {0}".format((stayed_true+left_true)/(len(pred_data))) )
confusion_matrix(predictions, data_labels)
confusion_matrix(predictions, data_labels, 0.33)
test_data = (test_set.drop("left", axis=1)).values
test_data_labels = test_set["left"].values
test_data_labels = test_data_labels.reshape([len(test_data_labels), 1])
test_predictions = sess.run(output_layer, feed_dict={X_init:test_data})
confusion_matrix(test_predictions, test_data_labels, 0.33)
confusion_matrix(test_predictions, test_data_labels)
print("Cost for train data: {0}".format(sess.run(cost, feed_dict={X_init:data, Y_init:data_labels})) )
print("Cost for test data: {0}".format(sess.run(cost, feed_dict={X_init:test_data, Y_init:test_data_labels}) ) )
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare and shape the data
Step2: Build the recommendation model
Step3: Test the model
Step4: This doesn't give us that good of a representation of ranking becuase the ranks are number of purchases. Something better may be to look at some actual recommendations.
Step5: <img src='https
|
<ASSISTANT_TASK:>
Python Code:
#Put the csv into an RDD (at first, each row in the RDD is a string which
#correlates to a line in the csv
retailData = sc.textFile("OnlineRetail.csv")
print retailData.take(2)
from pyspark.mllib.recommendation import ALS, Rating
import re
#Remove the header from the RDD
header = retailData.first()
retailData = retailData.filter(lambda line: line != header)
#To produce the ALS model, we need to train it with each individual
#purchase. Each record in the RDD must be the customer id,
#item id, and the rating. In this case, the rating is the quantity
#ordered. MLlib converts these into a sparce, unfactored matrix.
retailData = retailData.map(lambda l: l.split(",")).\
filter(lambda l: int(l[3]) > 0 and len(re.sub("\D", "", l[1])) != 0 and len(l[6]) != 0).\
map(lambda l: (int(l[6]),int(re.sub("\D", "", l[1])),int(l[3])))
#Randomly split the data into a testing set and a training set
testRDD, trainRDD = retailData.randomSplit([.2,.8])
trainData = trainRDD.map(lambda l: Rating(l[0],l[1],l[2]))
print trainData.take(2)
print
print testRDD.take(2)
#Use trainging RDD to train a model with Alternating Least Squares
#rank=5
#5 columns in the user-feature and product-feature matricies
#iterations=10
#10 factorization runs
rank = 5
numIterations = 10
model = ALS.train(trainData, rank, numIterations)
print "The model has been trained"
#Evaluate the model with the test rdd by using the predictAll function
predict = model.predictAll(testRDD.map(lambda l: (l[0],l[1])))
#Calculate and print the Mean Squared Error
predictions = predict.map(lambda l: ((l[0],l[1]), l[2]))
ratingsAndPredictions = testRDD.map(lambda l: ((l[0], l[1]), l[2])).join(predictions)
ratingsAndPredictions.cache()
print ratingsAndPredictions.take(3)
meanSquaredError = ratingsAndPredictions.map(lambda l: (l[1][0] - l[1][1])**2).mean()
print
print 'Mean squared error = %.4f' % meanSquaredError
recs = model.recommendProducts(15544,5)
for rec in recs:
print rec
#Rating(user=15544, product=84568, rating=193.03195106065823)
#GIRLS ALPHABET IRON ON PATCHES
#Rating(user=15544, product=16033, rating=179.45915040198466)
#MINI HIGHLIGHTER PENS
#Rating(user=15544, product=22266, rating=161.04293255928698)
#EASTER DECORATION HANGING BUNNY
#Rating(user=15544, product=84598, rating=141.00162368678377)
#BOYS ALPHABET IRON ON PATCHES
#Rating(user=15544, product=72803, rating=129.54033486738518)
#ROSE SCENT CANDLE JEWELLED DRAWER
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: First define the projection operator for a state at angle $\theta$
Step3: Create the projection operators for each of the angles, two for Alice, two for Bob
Step4: Create the state $\big|\psi\big\rangle = \sqrt{0.2} \big|H,H\big\rangle + \sqrt{0.8} \big|V,V\big\rangle$
Step5: Now, find the joint probability that Alice measures A1 and Bob measures B1. We do this by finding the expectation value of the projection operator for the joint state $\big|\theta_{A1},\theta_{B1}\big\rangle$. This is formed as the tensor product of the two appropriate projection operators. In these tensor products, be sure to put Alice's operator first, then Bob's (just like we did for the signal and idler photons). Each operator acts on the photon corresponding to the order in the tensor() function.
Step6: Find the conditional probability $P(\theta_{B2}|\theta_{A1}) = \frac{P(\theta_{B2},\theta_{A1})}{P(\theta_{A1})}$
Step7: Find the conditional probability $P(\theta_{A2}|\theta_{B1}) = \frac{P(\theta_{A2},\theta_{B1})}{P(\theta_{B1})}$
Step8: This is what we described in class.
Step9: This is harder to interpret, but we clearly have different probabilities. Finally, check if we had used a mixed state
Step10: We see that $P(\theta_{B2},\theta_{A2}) > P(\theta_{B1},\theta_{A1})$ as we said in class for a state that obeys realism.
Step11: The calculations are actually the same in QuTiP, the expect function takes either a ket state or a density matrix.
Step12: These all agree (as they should).
Step13: Make a list of the probability of joint measurements for a pair of angles
Step14: We see that the joint probabilities have a zero at 35˚. Now plug that in to one of the conditional probabilities and see what angle for the conditional probability gives 1
Step15: So only 19 and 35 work. Now, can you derive 19 and 35 given only the state $|\psi\rangle$? Try the first plot, i.e. calculate the joint probability $P(\theta_A,\theta_B)$
Step16: Challenge
|
<ASSISTANT_TASK:>
Python Code:
from numpy import sin,cos,pi,sqrt,angle,exp,deg2rad,arange,rad2deg
import matplotlib.pyplot as plt
from qutip import *
%matplotlib inline
H = Qobj([[1],[0]])
V = Qobj([[0],[1]])
def P(theta):
The projection operator for a state at angle theta
theta_ket = cos(theta)*H + sin(theta)*V
return theta_ket*theta_ket.dag()
Pa1 = P(deg2rad(19))
Pa2 = P(deg2rad(-35))
Pb1 = P(deg2rad(-19))
Pb2 = P(deg2rad(35))
psi=sqrt(0.2)*tensor(H,H) + sqrt(0.8)*tensor(V,V)
P1 = expect(tensor(Pa1,Pb1),psi) # joint for A1, B1 (expect 0.09)
P2 = psi.dag()*tensor(Pa1,Pb1)*psi
P1 == P2.data[0,0] # The only difference is that we have to pull out the value
# from the Qobj using the .data[0,0] method so we can compare it to result from `expect`
P1
# B2 conditioned on A1 (expect 1)
Prob_b2_a1 = expect(tensor(Pa1,Pb2),psi)
#(psi.dag()*tensor(Pa1,Pb2)*psi).data[0,0] # the joint probability
Prob_a1 = expect(tensor(Pa1,qeye(2)),psi)
#(psi.dag()*tensor(Pa1,qeye(2))*psi).data[0,0] # the singular probability
Prob_b2a1 = Prob_b2_a1 / Prob_a1 # the conditional probability
Prob_b2a1
# A2 conditioned on B1 (expect 1)
# can do it all on one line:
expect(tensor(Pa2,Pb1),psi) / expect(tensor(qeye(2),Pb1),psi)
expect(tensor(Pa2,Pb2),psi) # joint for A2, B2 (classically expect 0.09, QM says 0)
psi2=tensor(H,H)
expect(tensor(Pa1,Pb1),psi2) # joint for A1, B1 (expect 0.09)
# B2 conditioned on A1:
expect(tensor(Pa1,Pb2),psi2) / expect(tensor(Pa1,qeye(2)),psi2)
# A2 conditioned on B1
expect(tensor(Pa2,Pb1),psi2) / expect(tensor(qeye(2),Pb1),psi2)
# joint for A2, B2
expect(tensor(Pa2,Pb2),psi2)
rho_mix = 0.2 * ket2dm(tensor(H,H)) + 0.8 * ket2dm(tensor(V,V))
rho_mix
# joint for A1, B1
expect(tensor(Pa1,Pb1),rho_mix)
# B2 conditioned on A1
expect(tensor(Pa1,Pb2),rho_mix) / expect(tensor(Pa1,qeye(2)),rho_mix)
# A2 conditioned on B1
expect(tensor(Pa2,Pb1),rho_mix) / expect(tensor(Pb1,qeye(2)),rho_mix)
# joint for A2, B2:
expect(tensor(Pa2,Pb2),rho_mix)
rho_pure = ket2dm(psi) # convert from a ket to a density matrix (dm)
rho_pure
# joint for A1, B1
expect(tensor(Pa1,Pb1),rho_pure)
# B2 conditioned on A1
expect(tensor(Pa1,Pb2),rho_pure) / expect(tensor(Pa1,qeye(2)),rho_pure)
# A2 conditioned on B1
expect(tensor(Pa2,Pb1),rho_pure) / expect(tensor(Pb1,qeye(2)),rho_pure)
# joint for A2, B2:
expect(tensor(Pa2,Pb2),rho_pure)
psi=sqrt(0.2)*tensor(H,H) + sqrt(0.8)*tensor(V,V)
angles = arange(1,90,1)
rads = deg2rad(angles)
out = []
for r in rads:
out.append(expect(tensor(P(-r),P(r)),psi))
plt.plot(angles,out,".") # plot in units of pi
out = []
for r in rads:
out.append(expect(tensor(P(r),P(deg2rad(35))),psi) / expect(tensor(P(r),qeye(2)),psi))
plt.plot(angles,out,".")
# Solution:
# For the first plot, we can show the joint probability for two angles is given by:
plt.plot(rad2deg(rads),(sqrt(0.2)*cos(-rads)*cos(rads) + sqrt(0.8)*sin(-rads)*sin(rads))**2)
# Solution
psi3=sqrt(0.8)*tensor(H,H) + sqrt(0.2)*tensor(V,V)
out = []
for r in rads:
out.append(expect(tensor(P(-r),P(r)),psi3))
plt.plot(angles,out,".") # plot in units of pi
# Solution
out = []
for r in rads:
out.append(expect(tensor(P(r),P(deg2rad(55))),psi3) / expect(tensor(P(r),qeye(2)),psi3))
plt.plot(angles,out,".")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3> Simulate some time-series data </h3>
Step2: <h3> Train model locally </h3>
Step3: <h3> Cloud ML Engine </h3>
Step4: Monitor training with TensorBoard
|
<ASSISTANT_TASK:>
Python Code:
# Change these to try this notebook out
BUCKET = "cloud-training-demos-ml"
PROJECT = "cloud-training-demos"
REGION = "us-central1"
SEQ_LEN = 50
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['SEQ_LEN'] = str(SEQ_LEN)
os.environ['TFVERSION'] = "1.13"
import tensorflow as tf
print(tf.__version__)
import numpy as np
import seaborn as sns
def create_time_series():
freq = (np.random.random()*0.5) + 0.1 # 0.1 to 0.6
ampl = np.random.random() + 0.5 # 0.5 to 1.5
noise = [np.random.random()*0.3 for i in range(SEQ_LEN)] # -0.3 to +0.3 uniformly distributed
x = np.sin(np.arange(0,SEQ_LEN) * freq) * ampl + noise
return x
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
for i in range(0, 5):
sns.tsplot( create_time_series(), color=flatui[i%len(flatui)] ); # 5 series
def to_csv(filename, N):
with open(filename, 'w') as ofp:
for lineno in range(0, N):
seq = create_time_series()
line = ",".join(map(str, seq))
ofp.write(line + '\n')
import os
try:
os.makedirs("data/sines/")
except OSError:
pass
np.random.seed(1) # makes data generation reproducible
to_csv("data/sines/train-1.csv", 1000) # 1000 sequences
to_csv("data/sines/valid-1.csv", 250)
!head -5 data/sines/*-1.csv
%%bash
DATADIR=$(pwd)/data/sines
OUTDIR=$(pwd)/trained/sines
rm -rf $OUTDIR
gcloud ml-engine local train \
--module-name=sinemodel.task \
--package-path=${PWD}/sinemodel \
-- \
--train_data_path="${DATADIR}/train-1.csv" \
--eval_data_path="${DATADIR}/valid-1.csv" \
--output_dir=${OUTDIR} \
--model=rnn2 --train_steps=10 --sequence_length=$SEQ_LEN
import shutil
shutil.rmtree(path = "data/sines", ignore_errors = True)
os.makedirs("data/sines/")
np.random.seed(1) # makes data generation reproducible
for i in range(0,10):
to_csv("data/sines/train-{}.csv".format(i), 1000) # 1000 sequences
to_csv("data/sines/valid-{}.csv".format(i), 250)
%%bash
gsutil -m rm -rf gs://${BUCKET}/sines/*
gsutil -m cp data/sines/*.csv gs://${BUCKET}/sines
%%bash
for MODEL in linear dnn cnn rnn rnn2; do
OUTDIR=gs://${BUCKET}/sinewaves/${MODEL}
JOBNAME=sines_${MODEL}_$(date -u +%y%m%d_%H%M%S)
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=sinemodel.task \
--package-path=${PWD}/sinemodel \
--job-dir=$OUTDIR \
--scale-tier=BASIC \
--runtime-version=$TFVERSION \
-- \
--train_data_path="gs://${BUCKET}/sines/train*.csv" \
--eval_data_path="gs://${BUCKET}/sines/valid*.csv" \
--output_dir=$OUTDIR \
--train_steps=3000 --sequence_length=$SEQ_LEN --model=$MODEL
done
from google.datalab.ml import TensorBoard
TensorBoard().start("gs://{}/sinewaves".format(BUCKET))
for pid in TensorBoard.list()["pid"]:
TensorBoard().stop(pid)
print("Stopped TensorBoard with pid {}".format(pid))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Nodes will be Author objects, each of which holds a list of Commit objects.
Step2: We create a list of authors, also separately keeping track of committer names to make sure we only add each author once. If a commit by an already stored author is found, we add it to that authors list of commits.
Step3: We create our graph by forming an edge whenever an author has a commit which is the parent of another author's commit, and only increasing the weight of that edge if an edge between those two authors already exists.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from bigbang.git_repo import GitRepo;
from bigbang import repo_loader;
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
repos = repo_loader.get_org_repos("codeforamerica")
repo = repo_loader.get_multi_repo(repos=repos)
full_info = repo.commit_data;
class Commit:
def __init__(self, message, hexsha, parents):
self.message = message
self.hexsha = hexsha
self.parents = parents
def __repr__(self):
return ' '.join(self.message.split(' ')[:4])
class Author:
def __init__(self, name, commits):
self.name = name
self.commits = commits
self.number_of_commits = 1
def add_commit(self, commit):
self.commits.append(commit)
self.number_of_commits += 1
def __repr__(self):
return self.name
def get_authors():
authors = []
names = []
for index, row in full_info.iterrows():
name = row["Committer Name"]
hexsha = row["HEXSHA"]
parents = row["Parent Commit"]
message = row["Commit Message"]
if name not in names:
authors.append(Author(name, [Commit(message, hexsha, parents)]))
names.append(name)
else:
for author in authors:
if author.name == name:
author.add_commit(Commit(message, hexsha, parents))
return authors
def make_graph(nodes):
G = nx.Graph()
for author in nodes:
for commit in author.commits:
for other in nodes:
for other_commit in other.commits:
if commit.hexsha in other_commit.parents:
if G.has_edge(author, other):
G[author][other]['weight'] += 1
else:
G.add_edge(author, other, weight = 1)
return G
nodes = get_authors()
G = make_graph(nodes)
pos = nx.spring_layout(G, iterations=100)
nx.draw(G, pos, font_size=8, with_labels = False)
# nx.draw_networkx_labels(G, pos);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: Hardware Accelerators
Step13: Container (Docker) image
Step14: Machine Type
Step15: Tutorial
Step16: Train a model
Step17: Task.py contents
Step18: Write the Docker file contents
Step19: Build the container locally
Step20: Next, build the container.
Step21: Test the container locally
Step22: Register the custom container
Step23: Store training script on your Cloud Storage bucket
Step24: Prepare your custom job specification
Step25: Prepare your disk specification
Step26: Prepare your container specification
Step27: Define the worker pool specification
Step28: Assemble a job specification
Step29: Train the model
Step30: Now get the unique identifier for the custom job you created.
Step31: Get information on a custom job
Step32: Deployment
Step33: Load the saved model
Step34: Evaluate the model
Step35: Perform the model evaluation
Step36: Upload the model for serving
Step37: Get the serving function signature
Step38: Upload the model
Step39: Get Model resource information
Step40: Deploy the Model resource
Step41: Now get the unique identifier for the Endpoint resource you created.
Step42: Compute instance scaling
Step43: Deploy Model resource to the Endpoint resource
Step44: Make a online prediction request
Step45: Prepare the request content
Step46: Send the prediction request
Step47: Undeploy the Model resource
Step48: Cleaning up
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU)
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
%%writefile custom/Dockerfile
FROM gcr.io/deeplearning-platform-release/tf2-cpu.2-1
WORKDIR /root
WORKDIR /
# Copies the trainer code to the docker image.
COPY trainer /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
TRAIN_IMAGE = "gcr.io/" + PROJECT_ID + "/cifar10:v1"
! docker build custom -t $TRAIN_IMAGE
! docker run $TRAIN_IMAGE --epochs=5
! docker push $TRAIN_IMAGE
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
JOB_NAME = "_custom_container" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
container_spec = {
"image_uri": TRAIN_IMAGE,
"args": CMDARGS,
}
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"container_spec": container_spec,
"disk_spec": disk_spec,
}
]
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
model.evaluate(x_test, y_test)
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
return resized
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
model, model_path_to_deploy, signatures={"serving_default": serving_fn}
)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
ENDPOINT_NAME = "cifar10_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "cifar10_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
test_image = x_test[0]
test_label = y_test[0]
print(test_image.shape)
import base64
import cv2
cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8))
bytes = tf.io.read_file("tmp.jpg")
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
def predict_image(image, endpoint, parameters_dict):
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: {"b64": image}}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters_dict
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_image(b64str, endpoint_id, None)
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate city boundaries
Step2: Get zonal stats for each metro cluster
|
<ASSISTANT_TASK:>
Python Code:
# load shapefile of all admin areas / countries as geodataframe
gdf = gpd.read_file('data/geo/countries/countries_nf2.shp'); gdf.head(3)
# filter out countries not internationally recognized
country_filter1 = gdf['WB_A3'] != '-99'
gdf = gdf.drop_duplicates(subset='WB_A3')
gdf = gdf[country_filter1].set_index('WB_A3')
# loop through rows of geodataframe and save each row as a country-specific shapefile in newly created dir
# shp_to_shps('data/geo/countries/shp', gdf)
# clip master raster from 2013 by each country shapefile to create country-level rasters
input_tif_path = 'data/geo/images/F182013.v4c_web.stable_lights.avg_vis.tif'
input_shp_dir = 'data/geo/countries/shp'
output_tif_dir = 'data/geo/countries/tif'
countries = [x.encode('UTF-8') for x in gdf.index.values]
# raster_to_rasters(countries, input_tif_path, input_shp_dir, output_tif_dir)
# polygonize rasters and save to target directory
input_tif_dir = 'data/geo/countries/tif'
output_shp_dir = 'data/geo/countries/poly'
# polygonize(input_tif_dir, output_shp_dir, countries)
# filter and union countries, save to target directory
input_dir = 'data/geo/countries/poly'
output_dir = 'data/geo/cities/union'
# union_and_filter(input_dir, output_dir, countries)
# split multi-polygons into polygons
input_dir = 'data/geo/cities/union'
output_dir = 'data/geo/cities/split'
# split_multi_to_single_poly(input_dir, output_dir)
# Merge shapefiles in directory
input_dir = 'data/geo/cities/split'
output_dir = 'data/geo/cities/merge'
output_filename = 'merged.shp'
# merge_shapefiles(input_dir, output_dir, output_filename)
# set CRS of merged shapefile
input_path = 'data/geo/cities/merge/merged.shp'
crs = 'epsg:4326'
output_path = 'data/geo/cities/merge/merged_crs.shp'
# set_crs(input_path, crs, output_path)
# zip merged shapefiles
target_dir = 'data/geo/cities/merge'
shp_filename = 'merged_crs.shp'
zip_filename = 'merged_crs.zip'
shp_path = os.path.join(target_dir, shp_filename)
zip_path = os.path.join(target_dir, zip_filename)
zip_path = os.path.abspath(zip_path)
shp_filename_no_ext = shp_filename[:-4]
glob_string = os.path.join(target_dir, shp_filename_no_ext) + '*'
list_of_shps = glob.glob(glob_string)
list_of_shps = [os.path.abspath(x) for x in list_of_shps]
#zip_files(list_of_shps, zip_path)
cdb_api_key = 'your_api_key'
cdb_domain = 'your_username'
c = CartoDBAPIKey(cdb_api_key, cdb_domain)
url = 'http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/10m/cultural/ne_10m_populated_places_simple.zip'
# upload populated places shapefiles to cartodb
fi_1 = URLImport(url, c, privacy='public')
# upload zipped merged_crs shapefiles to cartodb
file_to_import = 'data/geo/cities/merge/merged_crs.zip'
fi_2 = FileImport(file_to_import, c, privacy='public')
#fi_1.run()
#fi_2.run()
#fi_1.success, fi_2.success
# call cartodb sql api to get polygons that intersect with cites, format as geojson
#intersection = c.sql(sql = 'with pop as (select nameascii, adm0_a3, pop_max, the_geom from ne_10m_populated_places_simple where pop_max > 1000000) select merged_crs.cartodb_id, pop.nameascii, pop.adm0_a3, pop.pop_max, merged_crs.the_geom from pop, merged_crs where ST_Within(pop.the_geom, merged_crs.the_geom)', format='geojson')
# dump geojson output from cartodb into file
dir_intersect = 'data/geo/cities/intersect'
filename = 'cities.geojson'
path = os.path.join(dir_intersect, filename)
#rm_and_mkdir(dir_intersect)
#with open(path, 'w') as outfile:
#json.dump(intersection, outfile)
# write geojson to shapefile in same direcory: these are the metro clusters
shp_path = 'data/geo/cities/intersect/cities.shp'
geojson_path = 'data/geo/cities/intersect/cities.geojson'
#subprocess.check_call(['ogr2ogr', '-F', 'ESRI Shapefile', shp_path, geojson_path, 'OGRGeoJSON'])
# load cities shapefile and get zonal stats
tif_dir = 'data/geo/images'
input_shp_path = 'data/geo/cities/intersect/cities.shp'
#gdf = zonal_to_shp(tif_dir, shp_path)
# dump to pickle
#with open('data/geo/pickles/zonal_stats_m.pickle', 'wb') as f:
#pickle.dump(gdf, f)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The original data is 90 degree off. So in data loading function, I use transpose to fix it.
Step2: feed forward prediction
Step3: accuracy
|
<ASSISTANT_TASK:>
Python Code:
theta1, theta2 = nn.load_weight('ex3weights.mat')
theta1.shape, theta2.shape
X, y = nn.load_data('ex3data1.mat',transpose=False)
X = np.insert(X, 0, values=np.ones(X.shape[0]), axis=1) # intercept
X.shape, y.shape
a1 = X
z2 = a1 @ theta1.T # (5000, 401) @ (25,401).T = (5000, 25)
z2.shape
z2 = np.insert(z2, 0, values=np.ones(z2.shape[0]), axis=1)
a2 = lr.sigmoid(z2)
a2.shape
z3 = a2 @ theta2.T
z3.shape
a3 = lr.sigmoid(z3)
a3
y_pred = np.argmax(a3, axis=1) + 1 # numpy is 0 base index, +1 for matlab convention
y_pred.shape
print(classification_report(y_pred, y_pred))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Languages Coverage
Step2: Downloading Necessary Models
Step4: Example
Step5: We can query all the tagged words
Step6: Command Line Interface
|
<ASSISTANT_TASK:>
Python Code:
from polyglot.transliteration import Transliterator
from polyglot.downloader import downloader
print(downloader.supported_languages_table("transliteration2"))
%%bash
polyglot download embeddings2.en pos2.en
from polyglot.text import Text
blob = We will meet at eight o'clock on Thursday morning.
text = Text(blob)
for x in text.transliterate("ar"):
print(x)
!polyglot --lang en tokenize --input testdata/cricket.txt | polyglot --lang en transliteration --target ar | tail -n 30
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the MNIST dataset, flatten the images, convert the class labels, and scale the data.
Step2: I. OverFeat adaptation of AlexNet (2012)
Step3: As you can imagine, training this model (even on MNIST-10) is quite time consuming. I'll run just one Epoch with 10 samples to show how it works.
Step4: The true power of this model really comes out when it is used on a larger corpus of images, such as ILSVRC and MS COCO, with images having a larger spatial size.
Step5: We then load the weights of the model from a file (you can download this from the course website; it is not small, coming in at about half a gigabyte). We then have to compile the model, even though we have no intention of actually training it. This is because the compilation in part sets the forward propigation code, which we will need to do predictions.
Step6: We will also load some metadata, that gives class labels to the output
Step7: Now lets read in an image of a lion
Step8: And now predict the class label from the VGG-19 model
Step9: A relatively impressive result for an out of sample image!
Step10: IV. Batch Normalization
Step11: V. Residual block - as in ResNet (2015)
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.datasets import mnist, cifar10
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.optimizers import SGD, RMSprop
from keras.utils import np_utils
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from PIL import Image
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32') / 255
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32') / 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
# Layer 1
model.add(Convolution2D(96, 11, 11, input_shape = (1,28,28), border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 2
model.add(Convolution2D(256, 5, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 3
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, border_mode='same'))
model.add(Activation('relu'))
# Layer 4
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(1024, 3, 3, border_mode='same'))
model.add(Activation('relu'))
# Layer 5
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(1024, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 6
model.add(Flatten())
model.add(Dense(3072, init='glorot_normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# Layer 7
model.add(Dense(4096, init='glorot_normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# Layer 8
model.add(Dense(10, init='glorot_normal'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=RMSprop())
model.fit(X_train[:10], Y_train[:10], batch_size=1, nb_epoch=1,
verbose=1, show_accuracy=True)
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
model.load_weights("../../../class_data/keras/vgg19_weights.h5")
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
synsets = []
with open("../../../class_data/keras/synset_words.txt", "r") as f:
synsets += f.readlines()
synsets = [x.replace("\n","") for x in synsets]
im = Image.open('img/lion.jpg').resize((224, 224), Image.ANTIALIAS)
plt.figure(figsize=(4, 4))
plt.axis("off")
plt.imshow(im)
im = np.array(im).astype(np.float32)
# scale the image, according to the format used in training
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
out = model.predict(im)
for index in np.argsort(out)[0][::-1][:10]:
print("%01.4f - %s" % (out[0][index], synsets[index].replace("\n","")))
model = Graph()
model.add_input(name='n00', input_shape=(1,28,28))
# layer 1
model.add_node(Convolution2D(64,1,1, activation='relu'), name='n11', input='n00')
model.add_node(Flatten(), name='n11_f', input='n11')
model.add_node(Convolution2D(96,1,1, activation='relu'), name='n12', input='n00')
model.add_node(Convolution2D(16,1,1, activation='relu'), name='n13', input='n00')
model.add_node(MaxPooling2D((3,3),strides=(2,2)), name='n14', input='n00')
# layer 2
model.add_node(Convolution2D(128,3,3, activation='relu'), name='n22', input='n12')
model.add_node(Flatten(), name='n22_f', input='n22')
model.add_node(Convolution2D(32,5,5, activation='relu'), name='n23', input='n13')
model.add_node(Flatten(), name='n23_f', input='n23')
model.add_node(Convolution2D(32,1,1, activation='relu'), name='n24', input='n14')
model.add_node(Flatten(), name='n24_f', input='n24')
# output layer
model.add_node(Dense(1024, activation='relu'), name='layer4',
inputs=['n11_f', 'n22_f', 'n23_f', 'n24_f'], merge_mode='concat')
model.add_node(Dense(10, activation='softmax'), name='layer5', input='layer4')
model.add_output(name='output1',input='layer5')
model.compile(loss={'output1':'categorical_crossentropy'}, optimizer=RMSprop())
model.fit({'n00':X_train[:100], 'output1':Y_train[:100]}, nb_epoch=1, verbose=1)
model = Sequential()
model.add(Convolution2D(6, 5, 5, border_mode='valid', input_shape = (1, 28, 28)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Convolution2D(16, 5, 5, border_mode='valid'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Convolution2D(120, 1, 1, border_mode='valid'))
model.add(Flatten())
model.add(Dense(84))
model.add(Activation("relu"))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=RMSprop())
model.fit(X_train, Y_train, batch_size=32, nb_epoch=20,
verbose=1, show_accuracy=True, validation_data=(X_test, Y_test))
model = Graph()
model.add_input(name='input0', input_shape=(1,28,28))
model.add_node(Flatten(), name='input1', input='input0')
model.add_node(Dense(50), name='input2', input='input1')
model.add_node(Dense(50, activation='relu'), name='middle1', input='input2')
model.add_node(Dense(50, activation='relu'), name='middle2', input='middle1')
model.add_node(Dense(512, activation='relu'), name='top1',
inputs=['input2', 'middle2'], merge_mode='sum')
model.add_node(Dense(10, activation='softmax'), name='top2', input='top1')
model.add_output(name='top3',input='top2')
model.compile(loss={'top3':'categorical_crossentropy'}, optimizer=RMSprop())
model.fit({'input0':X_train, 'top3':Y_train}, nb_epoch=25, verbose=1,
validation_data={'input0':X_test, 'top3':Y_test})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle.
Step2: Dataset Parameters
Step3: For information on the included passband-dependent parameters (not mentioned below), see the section on the lc dataset (these are used only to compute fluxes when rv_method is 'flux-weighted')
Step4: rvs
Step5: sigmas
Step6: compute_times / compute_phases
Step7: Compute Options
Step8: rv_method
Step9: If rv_method is set to 'dynamical' then the computed radial velocities are simply the z-velocities of the centers of mass of each component. In this case, only the dynamical options are relevant. For more details on these, see the section on the orb dataset.
Step10: See the Gravitational Redshift example for more details on the influence this parameter has on radial velocities.
Step11: Plotting
Step12: Since these are the only two columns available in the synthetic model, the only other options is to plot in phase instead of time.
Step13: In system hierarchies where there may be multiple periods, it is also possible to determine whose period to use for phasing.
Step14: Mesh Fields
Step15: These new columns are stored with the rv's dataset tag, but with the mesh model-kind.
Step16: Any of these columns are then available to use as edge or facecolors when plotting the mesh (see the section on the MESH dataset).
Step17: rvs
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
from phoebe import u # units
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('rv')
print(b.get_dataset(kind='rv', check_visible=False))
print(b.get_parameter(qualifier='times', component='primary'))
b.set_value('times', component='primary', value=[0])
print(b.get_parameter(qualifier='rvs', component='primary'))
print(b.get_parameter(qualifier='sigmas', component='primary'))
print(b.get_parameter(qualifier='compute_times'))
print(b.get_parameter(qualifier='compute_phases', context='dataset'))
print(b.get_parameter(qualifier='phases_t0'))
print(b.get_compute())
print(b.get_parameter(qualifier='rv_method', component='primary'))
print(b.get_parameter(qualifier='rv_grav', component='primary'))
b.set_value_all('times', phoebe.linspace(0,1,101))
b.run_compute(irrad_method='none')
print(b.filter(context='model').twigs)
print(b.get_parameter(qualifier='times', component='primary', kind='rv', context='model'))
print(b.get_parameter(qualifier='rvs', component='primary', kind='rv', context='model'))
afig, mplfig = b.plot(show=True)
afig, mplfig = b.plot(x='phases', show=True)
print(b.filter(qualifier='period').components)
afig, mplfig = b.plot(x='phases:binary', show=True)
b.add_dataset('mesh', times=[0], dataset='mesh01')
print(b.get_parameter(qualifier='columns').choices)
b.set_value('columns', value=['rvs@rv01'])
b.run_compute(irrad_method='none')
print(b.get_model().datasets)
print(b.filter(dataset='rv01', kind='mesh', context='model').twigs)
afig, mplfig = b.filter(kind='mesh').plot(fc='rvs', ec='None', show=True)
print(b.get_parameter(qualifier='rvs',
component='primary',
dataset='rv01',
kind='mesh',
context='model'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Read pickle files
Step2: 3. Group dataset
Step3: 4. Label the data
Step4: 5. Convert one-hot code
Step5: 6. Save data
Step6: 7. Pick some data for batch normalization inference
|
<ASSISTANT_TASK:>
Python Code:
from six.moves import cPickle as pickle
import matplotlib.pyplot as plt
import os
from random import sample, shuffle
import numpy as np
files = os.listdir('pickle')
dataset = dict()
for file_name in files:
with open('pickle/'+file_name, 'rb') as f:
save = pickle.load(f)
dataset.setdefault(file_name, save['image'])
del save
v_t = sample(xrange(1,9),2) + sample(xrange(9,17),2) + sample(xrange(16,25),2)
shuffle(v_t)
valid = v_t[:3]
test = v_t[3:]
train = list(set(range(1,25)) - set(v_t))
def get_names(ls):
return ['p'+str(x) for x in ls]
train = get_names(train)
valid = get_names(valid)
test = get_names(test)
print('train',train)
print('valid',valid)
print('test',test)
def add_dic(x,y):
return dict(x.items() + y.items())
def get_data(name_list):
data = [dataset.get(name,False) for name in name_list]
return reduce(add_dic,data)
# the dictionary is {name:numpy}; for example, one of the names is '30-8-1-gball-288.png'
train_dataset = get_data(train)
valid_dataset = get_data(valid)
test_dataset = get_data(test)
non_orientations = ['empty','cup','tball','pball','gball']
image_size = 50
def label_data(data):
objects = list()
orientations = list()
values = list()
for name, value in data.iteritems():
obj = name.split('.')[0].split('-')[-2] # object name
ori = name.split('.')[0].split('-')[-1] # orientation
objects.append(obj)
if obj in non_orientations:
orientations.append(0)
elif obj == 'gstick':
if name.split('.')[0].split('-')[2] in ['1','3']:
orientations.append(0)
else:
orientations.append(int(ori))
else:
orientations.append(int(ori))
values.append(value.reshape(image_size,image_size,1).astype(np.float32))
return objects, orientations, values
train_objects, train_orientations, train_values = label_data(train_dataset)
valid_objects, valid_orientations, valid_values = label_data(valid_dataset)
test_objects, test_orientations, test_values = label_data(test_dataset)
object2value = {'empty':0,'duck':1,'cup':2,'sponge':3,'tball':4,'pball':5,'gball':6,'gstick':7,'nerf':8,'calc':9,'stapler':10}
value2object = dict((value,name) for name,value in object2value.items())
orientations = [18*x for x in range(20)]
def convert_objects(objects):
obj_values = np.asarray([object2value[obj] for obj in objects])
return (np.arange(len(object2value)) == obj_values[:,None]).astype(np.float32)
def convert_orientations(orientations):
ori_values = np.asarray(orientations)/18%10
return (np.arange(10) == ori_values[:,None]).astype(np.float32)
train_objects_ = convert_objects(train_objects)
valid_objects_ = convert_objects(valid_objects)
test_objects_ = convert_objects(test_objects)
train_orientations_ = convert_orientations(train_orientations)
valid_orientations_ = convert_orientations(valid_orientations)
test_orientations_ = convert_orientations(test_orientations)
train_values_ = np.asarray(train_values).astype(np.float32)
valid_values_ = np.asarray(valid_values).astype(np.float32)
test_values_ = np.asarray(test_values).astype(np.float32)
data_file = 'depth_data'
with open(data_file,'wb') as f:
save={
'train_orientations':train_orientations_,
'valid_orientations':valid_orientations_,
'test_orientations':test_orientations_,
'train_objects':train_objects_,
'valid_objects':valid_objects_,
'test_objects':test_objects_,
'train_values':train_values_,
'valid_values':valid_values_,
'test_values':test_values_,
'object2value':object2value,
'value2object':value2object
}
pickle.dump(save,f)
f.close()
statinfo = os.stat(data_file)
file_size = float(statinfo.st_size)/1000
print('Compressed data size: %0.1fkB' % file_size)
def randomize(dataset, classes, angles):
permutation = np.random.permutation(classes.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_classes = classes[permutation]
shuffled_angles = angles[permutation]
return shuffled_dataset, shuffled_classes, shuffled_angles
train_dataset, train_classes, train_angles = randomize(train_values_, train_objects_, train_orientations_)
small_data = train_dataset[0:100,:,:,:]
with open('small_data','wb') as f:
save={
'small_data':small_data,
}
pickle.dump(save,f,pickle.HIGHEST_PROTOCOL)
f.close()
##test
image = train_dataset[11,:,:,:].reshape(-1,image_size, image_size,1)
image = np.append(image,small_data,axis=0)
print(image.shape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What problem does pandas solve?
Step2: Load a data set
Step3: pandas can load a lot more than csvs, this tutorial shows how pandas can read excel, sql,
Step4: Strip spaces in columns
Step5: for more info on lambda and map
Step6: Find nulls
Step7: Fill nulls
Step8: Selecting rows and columns
Step9: Differences between .loc, .iloc, and .ix
Step10: Rename columns
Step11: Boolean
Step12: Groupby
Step13: plotting with pandas
Step14: Practice Exercises
Step15: Extra practice
Step16: http
|
<ASSISTANT_TASK:>
Python Code:
# imports a library 'pandas', names it as 'pd'
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# enables inline plots, without it plots don't show up in the notebook
%matplotlib inline
# various options in pandas
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 20)
pd.set_option('display.precision', 3)
# download the data and name the columns
cols = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'ethnicity',
'gender', 'capital_gain', 'capital_loss', 'hours_per_week',
'country_of_origin', 'income']
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
names = cols)
# we can see there are no null values
# columns with numberical values are type int64, no need to set data type
df.info()
# to view the first 5 or specify with ex: .head(10)
df.head()
# there's a space before each string in this data
df.education.unique()
# looks like it's in every object column
df.workclass.unique()
# loop through each column and strip all the spaces
for col in df:
if df[col].dtype == 'O':
df[col] = df[col].map(lambda x: x.strip(' '))
# Here's a break down of what that for loop is doing
# loops through df and gets the column names
for col in df:
print col
# gets the column type
df.education.dtype
# if True then applys the map function
df.education.dtype == object
# strip function
x = ' string'
x.strip(' ')
# lambda creates a 'throw away' or 'anonymous' function
strip_string = lambda x: x.strip(' ')
strip_string(' string')
# same as this
def strip_string2(x):
x = x.strip(' ')
return x
strip_string2(' string')
# map applies the function to each item in the data frame column so
df[col].map(lambda x: x.strip(' '))
# does the same thing as
df['workclass'].map(strip_string2)
# but in the first case we don't have to define and name a function
df.education.value_counts()
df.hours_per_week.mean()
df[['age', 'capital_gain', 'capital_loss', 'hours_per_week']].describe()
# as we saw with df.info() there are no nulls...
# but if there were this would find the rows where age is null
df[df.age.isnull()]
# you could drop all those rows with
df_no_nulls = df[df.age.notnull()]
null_df = pd.DataFrame([1,2,4,np.nan], columns = ['column1'])
null_df
# you can also fill nulls with a value or string
null_df.column1.fillna(1000)
null_df.column1.fillna(null_df.column1.median())
null_df.column1.fillna('string')
# select a row
df.iloc[0]
# select a range of rows
df.iloc[10:15]
# last 2 rows
df.iloc[-2:]
# selecting every other row in columns 3-5
df.iloc[::2, 2:5].head()
df.loc[0:2, ['age', 'relationship ']]
# pd.DataFrame let's you turn series, arrays, lists, and more into data frame structures
df_index = pd.DataFrame([[1,2,3,4],[2,4,6,8],[3,5,7,9]], [11,13,12], columns = ['A', 'C', 'D', 'B'])
df_index
# iloc indexes by postion, not by the labels in the index
df_index.iloc[0:1]
# with loc both the start and the stop are included
df_index.loc[11:12]
# select columns by position
df_index.iloc[:,0:1]
# or by label
df_index.loc[:,'A':'D']
# ix: primarily label based, but will fall back to integer positional access
df_index.ix[:,'A':'C']
# ix: primarily label based, but will fall back to integer positional access
df_index.ix[:,0:2]
df.columns
# replace a column name
df.rename(columns = {'native_country' : 'country_of_origin'}, inplace = False)
df.native_country.unique()
df[df.native_country == 'United-States'].head()
df[(df.native_country != 'United-States') & (df.education_num > 9)].head()
df[(df.native_country != 'United-States') & (df.education_num > 9)].income.value_counts()
# How to groupby column and apply a function like sum, count, or mean
df.groupby(['relationship']).mean()
# To groupby multiple columns with multiple functions attached
df.groupby(['income', 'native_country']).age.agg(['count', 'mean'])
# grouped in order of which column is listed first
# combine groupby with boolean
df[df.native_country != 'United-States'].groupby(['education']).hours_per_week.mean()
df.age.hist(bins = 18);
# split the histograms by another column (with relatively few unique values)
df.hours_per_week.hist(by = df.income, bins = 25, figsize = (10,5));
# use value_counts() and a bar plot
df['workclass'].value_counts().plot(kind = 'bar')
df.boxplot(['age'], by = 'relationship');
df.plot(kind='scatter',
x = 'age',
y ='hours_per_week',
alpha = .25,
figsize = (10,5))
# how many males and females are in this data set
# plot the total number of people in each occupation
# what is the lowest average age of an occupation
# create a boxplot of hours per week by education level
# create a new column for income where >50K = 1 and <=50K = 0
# hint... http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.replace.html
# find which "native_country" has the highest percent of people earning >50K
# visualize what percent of people earn over 50K by education level
# make a hexbin plot
# check out plotting with Seaborn
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Results that can be accessed via scan_results
Step2: e.g. The first 10 data points for the scan results
Step3: Results can be saved using the default path as discussed in #Saving and default directories# with the save_results method
Step4: Or they can be saved to a specified location
Step5: Finally, a ScanFig object can be created using the plot method
Step6: ScanFig
Step7: The Figure shown above is empty - to show lines we need to click on the buttons. First we will click on the Flux Rates button which will allow any of the lines that fall into the category Flux Rates to be enabled. Then we click the other buttons
Step8: .. note
Step9: In the example below we set the Flux Rates visibility to False, but we set the J_R1 line visibility to True. Finally we use the show method instead of interact to display the figure.
Step10: The figure axes can also be adjusted via the adjust_figure method. Recall that the Vf2 scan was performed for a logarithmic scale rather than a linear scale. We will therefore set the x axis to log and its minimum value to 1. These settings are applied by clicking the Apply button.
Step11: The underlying matplotlib objects can be accessed through the fig and ax fields for the figure and axes, respectively. This allows for manipulation of the figures using matplotlib's functionality.
Step12: Finally the plot can be saved using the save method (or equivalently by pressing the save button) without specifying a path where the file will be saved as an svg vector image to the default directory as discussed under #Saving and default directories#
Step13: A file name together with desired extension (and image format) can also be specified
Step14: Tables
Step15: By default floats are all formatted according to the argument float_fmt which defaults to %.2f (using the standard Python formatter string syntax). A formatter function can be passed to as the formatter argument which allows for more customisation.
Step16: The constructed formatter takes a number (e.g. float, int, etc.) as argument and returns a formatter string according to the previously setup parameters.
Step17: Using this formatter with the previously constructed list_of_lists lead to a differently formatted html representation of the data
Step18: Graphic Representation of Metabolic Networks
Step19: Unless a layout has been previously defined, the species and reaction nodes will be placed randomly. Nodes are snap to an invisible grid.
Step20: A layout file for the example_model is included (see link for details) and can be loaded by specifying the location of the layout file on the disk during ModelGraph instantiation.
Step21: Clicking the Save Layout button saves this layout to the ~/Pysces/example_model/model_graph or C
|
<ASSISTANT_TASK:>
Python Code:
# PySCeS model instantiation using the `example_model.py` file
# with name `mod`
mod = pysces.model('example_model')
mod.SetQuiet()
# Parameter scan setup and execution
# Here we are changing the value of `Vf2` over logarithmic
# scale from `log10(1)` (or 0) to log10(100) (or 2) for a
# 100 points.
mod.scan_in = 'Vf2'
mod.scan_out = ['J_R1','J_R2','J_R3']
mod.Scan1(numpy.logspace(0,2,100))
# Instantiation of `Data2D` object with name `scan_data`
column_names = [mod.scan_in] + mod.scan_out
scan_data = psctb.utils.plotting.Data2D(mod=mod,
column_names=column_names,
data_array=mod.scan_res)
# Each key represents a field through which results can be accessed
scan_data.scan_results.keys()
scan_data.scan_results.scan_results[:10,:]
scan_data.save_results()
# This path leads to the Pysces root folder
data_file_name = '~/Pysces/example_mod_Vf2_scan.csv'
# Correct path depending on platform - necessary for platform independent scripts
if platform == 'win32':
data_file_name = psctb.utils.misc.unix_to_windows_path(data_file_name)
else:
data_file_name = path.expanduser(data_file_name)
scan_data.save_results(file_name=data_file_name)
# Instantiation of `ScanFig` object with name `scan_figure`
scan_figure = scan_data.plot()
scan_figure.interact()
#remove_next
# To avoid duplication - do not run #ex
display(Image(path.join(notebook_dir,'images','scan_fig_1.png'))) #ex
# The four method calls below are equivalent to clicking the category buttons
# scan_figure.toggle_category('Flux Rates',True)
# scan_figure.toggle_category('J_R1',True)
# scan_figure.toggle_category('J_R2',True)
# scan_figure.toggle_category('J_R3',True)
scan_figure.interact()
#remove_next
# To avoid duplication - do not run #ex
display(Image(path.join(notebook_dir,'images','scan_fig_2.png'))) #ex
print 'Line names : ', scan_figure.line_names
print 'Category names : ', scan_figure.category_names
scan_figure.toggle_category('Flux Rates',False)
scan_figure.toggle_line('J_R1',True)
scan_figure.show()
scan_figure.adjust_figure()
#remove_next
# To avoid duplication - do not run #ex
display(Image(path.join(notebook_dir,'images','scan_fig_3.png'))) #ex
scan_figure.fig.set_size_inches((6,4))
scan_figure.ax.set_ylabel('Rate')
scan_figure.line_names
scan_figure.show()
scan_figure.save()
# This path leads to the Pysces root folder
fig_file_name = '~/Pysces/example_mod_Vf2_scan.png'
# Correct path depending on platform - necessary for platform independent scripts
if platform == 'win32':
fig_file_name = psctb.utils.misc.unix_to_windows_path(fig_file_name)
else:
fig_file_name = path.expanduser(fig_file_name)
scan_figure.save(file_name=fig_file_name)
list_of_lists = [['a','b','c'],[1.2345,0.6789,0.0001011],[12,13,14]]
psctb.utils.misc.html_table(list_of_lists,
caption='Example')
formatter = psctb.utils.misc.formatter_factory(min_val=0.1,
max_val=10,
default_fmt='%.1f',
outlier_fmt='%.2e')
print formatter(0.09) # outlier
print formatter(0.1) # min for default
print formatter(2) # within range for default
print formatter(9) # max int for default
print formatter(10) # outlier
psctb.utils.misc.html_table(list_of_lists,
caption='Example',
formatter=formatter, # Previously constructed formatter
first_row_headers=True) # The first row can be set as the header
model_graph = psctb.ModelGraph(mod)
model_graph.show()
# To avoid duplication - do not run #ex
display(Image(path.join(notebook_dir,'images','model_graph_1.png'))) #ex
# This path leads to the provided layout file
path_to_layout = '~/Pysces/psc/example_model_layout.dict'
# Correct path depending on platform - necessary for platform independent scripts
if platform == 'win32':
path_to_layout = psctb.utils.misc.unix_to_windows_path(path_to_layout)
else:
path_to_layout = path.expanduser(path_to_layout)
model_graph = psctb.ModelGraph(mod, pos_dic=path_to_layout)
model_graph.show()
# To avoid duplication - do not run #ex
display(Image(path.join(notebook_dir,'images','model_graph_2.png'))) #ex
model_graph = psctb.ModelGraph(mod)
model_graph.show()
# To avoid duplication - do not run #ex
display(Image(path.join(notebook_dir,'images','model_graph_3.png'))) #ex
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Accessing netcdf file via thredds
Step3: Calculating wind speed in one grid cell over the prognosis time
Step4: Plotting wind speed
Step5: Cluster wind speeds depending on the standard variation of the wind direction (upper and lower limits) before applying a wind speed threshold.
Step6: See more at
Step7: Defining drift potential
Step8: Comparison to Föhn's model
Step9: Using real data from AROME model
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import netCDF4
import numpy as np
import pylab as plt
plt.rcParams['figure.figsize'] = (14, 5)
ncdata = netCDF4.Dataset('http://thredds.met.no/thredds/dodsC/arome25/arome_metcoop_default2_5km_latest.nc')
x_wind_v = ncdata.variables['x_wind_10m'] # x component wrt the senorge grid - not true East!!!
y_wind_v = ncdata.variables['y_wind_10m'] # y component wrt the senorge grid - not true North!!!
lat_v = ncdata.variables['latitude']
lon_v = ncdata.variables['longitude']
time_v = ncdata.variables['time']
t = netCDF4.num2date(time_v[:], time_v.units)
i_x = 200
i_y = 400
c_lat = lat_v[i_x,i_y]
c_lon = lon_v[i_x,i_y]
x_wind = x_wind_v[:,i_y,i_x]
y_wind = y_wind_v[:,i_y,i_x]
x_avg = np.mean(x_wind)
y_avg = np.mean(y_wind)
avg_wind_speed = np.sqrt(x_avg**2 + y_avg**2)
wind_speed = np.sqrt(x_wind**2 + y_wind**2)
wind_direction = np.arctan2(x_wind, y_wind) * 180 / np.pi
# using (x, y) results in N=0, W=-90, E=90, S=+/-180
# using (y, x) results in N=90, W=+/-180, E=0, S=-90
The wind direction is most likely affected by the down scaling of the wind speed vectors; MET will provide a separate variable
of wind direction in the netcdf files on thredds that is related to the original 2.5 km resolution.
st_threshold = 7.0 # (m/s); snow transport threshold varies depending on snow surface conditions
rel_wind_speed = np.where(wind_speed > st_threshold)
print(type(rel_wind_speed), len(rel_wind_speed), len(wind_speed))
plt.figure()
plt.plot(t, wind_speed)
plt.plot(t, x_wind, label='x-wind', color='g')
plt.plot(t, y_wind, label='y-wind', color='k')
plt.axhline(y=0, color='lightgrey')
plt.axhline(y=st_threshold, color='lightgrey')
plt.ylabel('Wind speed (m/s)')
plt.title('Wind speed at {0:.2f}E and {1:.2f}N'.format(c_lon, c_lat))
plt.show()
plt.figure()
plt.plot(t, wind_direction)
plt.axhline(y=0, color='lightgrey')
plt.ylabel('Wind direction (deg)')
plt.title('Wind direction at {0:.2f}E and {1:.2f}N'.format(c_lon, c_lat))
plt.show()
def avg_wind_dir(uav, vav):
if uav == 0:
if vav == 0:
return 0.0
else:
if vav > 0:
return 360.0
else:
return 180.0
else:
if uav > 0:
return 90.0-180.0 / np.pi * np.arctan(vav/uav) # had to swap 90 and 270 between if-else to get it right
else:
return 270.0-180.0 / np.pi * np.arctan(vav/uav)
# test avg_wind_dir()
uav = np.array([1., 1., -1., -1., 0.0, 1.0, 0.0])
vav = np.array([1., -1., 1., -1., 1.0, 0.0, 0.0])
exp_res = [45.0, 135.0, 315.0, 225.0, 360.0, 90.0, 0.0]
res = [avg_wind_dir(u, v) for u, v in zip(uav, vav)]
print(res, res==exp_res)
u = np.array([-10., 10., -10.])
v = np.array([1., -1., -1.])
res = [avg_wind_dir(x, y) for x, y in zip(u, v)]
print(res)
uav = np.mean(u)
vav = np.mean(v)
avg_dir = avg_wind_dir(uav, vav)
print(uav, vav, avg_dir)
def drift_potential(u, a=1.2, b=15, c=.16):
'''
Using a Gompertz function (subclass of sigmoid functions) to resample the experimental derived snow transport curve by
Föhn et al. 1980 of the form 8e-5 * u^3.
u: wind speed in m/s
a: is an asymptote; something like maximum possible additional snow depth
b: defines the displacment along the x-axis; kind of a delay before snow transport starts;
snow surface hardness will influence 'b'
c: defines the growth rate; a measure for how quickly snow transport increases with increasing wind speeds;
snow surface ahrdness and concurrent snow fall will influence 'c'
Default values for 'a', 'b', and 'c' represent best fit to Föhn's model.
TODO:
- link a, b, and c to snow surface conditions available from the seNorge model.
'''
# Additional loading by wind redistribution on leeward slopes
hs_wind_foehn = 8e-5 * u**3.0
hs_wind = a * np.exp(-b * np.exp(-c * u))
return hs_wind, hs_wind_foehn
dummy_wind = np.arange(0,35) # m/s
dummy_hs, hs_foehn = drift_potential(dummy_wind, a=1.2, b=15, c=.16)
plt.figure()
plt.axhline(y=0.05, linestyle='--', color='g') # lower limit for little snow transport
plt.axhline(y=0.2, linestyle='--', color='y') # lower limit for intermediate snow transport
plt.axhline(y=0.5, linestyle='--', color='r') # lower limit for severe snow transport
plt.plot(dummy_wind, hs_foehn, color='0.5', label='Föhn et.al, 1980')
plt.plot(dummy_wind, dummy_hs, label='snow drift potential')
plt.ylabel('Additional snow height')
plt.legend(loc=2)
plt.show()
hs_wind, hsf = drift_potential(wind_speed)
plt.figure()
plt.plot(t, hs_wind)
plt.ylabel('Additional snow height (m)')
ax_wind = plt.gca().twinx()
ax_wind.plot(t, wind_speed, color='k')
ax_wind.set_ylabel('Wind speed (m/s)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and clean data
Step2: Find definite slave data in CLIWOC data set
Step3: Clean CLIWOC data
Step4: cliwoc_data (unclassified) = 0
Step5: Load Slave Voyages data
Step6: Clean Slave voyages data
Step7: Join data sets
Step8: Test of fuzzywuzzy method
Step9: Encode data
Step10: Extract training data, and create list of classes
Step11: left this code so we can check if there are any null values in each
Step12: Fit training data to classifier
Step15: Test classifier
Step16: try decision trees plotting
|
<ASSISTANT_TASK:>
Python Code:
classifier_algorithm = "Decision Tree"
import collections
import exploringShipLogbooks
import numpy as np
import os.path as op
import pandas as pd
import exploringShipLogbooks.wordcount as wc
from fuzzywuzzy import fuzz
from sklearn import preprocessing
from sklearn.naive_bayes import MultinomialNB
from sklearn import tree
from exploringShipLogbooks.basic_utils import clean_data
from exploringShipLogbooks.basic_utils import encode_data_df
from exploringShipLogbooks.basic_utils import extract_logbook_data
from exploringShipLogbooks.fuzz_replacement import fuzzy_wuzzy_classification
from exploringShipLogbooks.basic_utils import isolate_columns
from exploringShipLogbooks.basic_utils import isolate_training_data
from exploringShipLogbooks.config import *
# extract data from zip file
cliwoc_data = extract_logbook_data('CLIWOC15.csv')
label_encoding = preprocessing.LabelEncoder().fit(cliwoc_data['LogbookIdent']).classes_
cliwoc_data['LogbookIdent'] = preprocessing.LabelEncoder().fit_transform(cliwoc_data['LogbookIdent'])
# extract logs that mention slaves
slave_mask = wc.count_key_words(cliwoc_data, text_columns, slave_words)
print('Found ', len(slave_mask[slave_mask]), ' logs that mention slaves')
# find indices of ship names that are "non-slave" ships before dropping ship name column
non_slave_log_locations = isolate_training_data(cliwoc_data, {'ShipName': non_slave_ships})
print('Found ', len(non_slave_log_locations[non_slave_log_locations==True]), ' logs that are non-slave ships')
cliwoc_data['slave_logs'] = np.zeros(len(cliwoc_data))
slave_log_locations = cliwoc_data['LogbookIdent'].isin(list(cliwoc_data['LogbookIdent']
[slave_mask].unique()))
cliwoc_data.loc[non_slave_log_locations,'slave_logs'] = 1
cliwoc_data.loc[slave_log_locations,'slave_logs'] = 2
cliwoc_data = cliwoc_data.sort_values('LogbookIdent', ascending=True)
cliwoc_data_all = cliwoc_data.set_index('LogbookIdent', drop= False).copy()
cliwoc_data = cliwoc_data.set_index('LogbookIdent', drop = False)
cliwoc_data = cliwoc_data.drop_duplicates('LogbookIdent')
# uncomment this if looking at ship names for manual review
#desired_columns.append('ShipName')
# remove undesired columns
cliwoc_data = isolate_columns(cliwoc_data, desired_columns)
data_path = op.join(exploringShipLogbooks.__path__[0], 'data')
file_name = data_path + '/tastdb-exp-2010'
slave_voyage_logs = pd.read_pickle(file_name)
year_ind = ~(slave_voyage_logs['yeardep'].isnull())
slave_voyage_logs = slave_voyage_logs[year_ind]
cliwoc_ind = (slave_voyage_logs['yeardep']>cliwoc_data['Year'].min()) & (slave_voyage_logs['yeardep']<cliwoc_data['Year'].max())
slave_voyage_logs = slave_voyage_logs[cliwoc_ind]
slave_voyage_desired_cols = list(slave_voyage_conversions.keys())
slave_voyage_logs = isolate_columns(slave_voyage_logs, slave_voyage_desired_cols)
slave_voyage_logs.rename(columns=slave_voyage_conversions, inplace=True)
#slave_voyage_logs.columns = ['Nationality', 'ShipType', 'VoyageFrom', 'VoyageTo', 'Year']
slave_voyage_logs['slave_logs'] = 3
slave_voyage_indices = range(len(slave_voyage_logs)) + (cliwoc_data.tail(1).index[0]+1)
slave_voyage_logs = slave_voyage_logs.set_index(slave_voyage_indices)
all_data = pd.concat([cliwoc_data, slave_voyage_logs])
#all_data = cliwoc_data.append(slave_voyage_logs)
all_data = clean_data(all_data)
# cleanup
#del cliwoc_data, slave_voyage_logs
all_data.head()
all_data_test = all_data.copy()
fuzz_columns = ['Nationality', 'ShipType', 'VoyageFrom', 'VoyageTo']
for col in fuzz_columns:
all_data = fuzzy_wuzzy_classification(all_data, col)
from sklearn.preprocessing import LabelEncoder
class MultiColumnLabelEncoder:
def __init__(self,columns = None):
self.columns = columns # array of column names to encode
def fit(self,X,y=None):
return self # not relevant here
def transform(self,X):
'''
Transforms columns of X specified in self.columns using
LabelEncoder(). If no columns specified, transforms all
columns in X.
'''
output = X.copy()
if self.columns is not None:
for col in self.columns:
if is_instance(X[col][0], str):
output[col] = LabelEncoder().fit_transform(output[col])
else:
output[col] = X[col]
else:
for colname,col in output.iteritems():
output[colname] = LabelEncoder().fit_transform(col)
return output
def fit_transform(self,X,y=None):
return self.fit(X,y).transform(X)
if classifier_algorithm == "Decision Tree":
all_data = MultiColumnLabelEncoder().fit_transform(all_data)
elif classifier_algorithm == "Naive Bayes":
all_data = encode_data_df(all_data)
all_data['no_data'] = all_data['nan'].apply(lambda x: x.any(), axis=1).astype(int)
all_data = all_data.drop('nan', axis=1)
else:
raise KeyError("Please enter a valid classification type (Decision Trees or Naive Bayes)")
unclassified_logs = all_data[all_data['slave_logs']==0]
#unclassified_logs = unclassified_logs.drop('slave_logs', axis=1)
validation_set_1 = all_data[all_data['slave_logs']==2]
#validation_set_1 = validation_set_1.drop('slave_logs', axis=1)
# reserve first 20% of slave_voyage_logs as validation set
validation_set_2_indices = range(slave_voyage_indices.min(),
slave_voyage_indices.min() + round(len(slave_voyage_indices)*.2))
validation_set_2 = all_data.iloc[validation_set_2_indices]
#validation_set_2 = validation_set_2.drop('slave_logs', axis=1)
training_logs_pos = all_data.drop(validation_set_2_indices)
training_logs_pos = training_logs_pos[training_logs_pos['slave_logs']==3]
#training_logs_pos = training_logs_pos.drop('slave_logs', axis=1)
# note! This relies on cliwoc data being first in all_data
# could make more robust later
training_logs_neg = all_data[all_data['slave_logs']==1]
#training_logs_neg = training_logs_neg.drop('slave_logs', axis=1)
# cleanup
#del all_data
def finding_null_values(df):
return df.isnull().sum()[df.isnull().sum()>0]
repeat_multiplier = round(len(training_logs_pos)/len(training_logs_neg))
# create list of classes for training data (0 is for non-slave, 1 is for slave)
# index matches training_data
classes = np.zeros(len(training_logs_neg)).repeat(repeat_multiplier)
#classes = np.append(classes, np.ones(len(training_logs_pos)))
classes = np.append(classes, np.ones(len(training_logs_pos)))
# join training data
neg_rep = pd.concat([training_logs_neg]*repeat_multiplier)
training_data = pd.concat([neg_rep, training_logs_pos], ignore_index = True)
# convert to numpy array
columns = list(training_data.columns)
columns.remove('slave_logs')
training_data = training_data.as_matrix(columns)
if classifier_algorithm == "Decision Tree":
classifier = MultinomialNB(alpha = 1.0, class_prior = None, fit_prior = True)
classifier.fit(training_data[::,1::], classes)
elif classifier_algorithm == "Naive Bayes":
classifier = tree.DecisionTreeClassifier()
classifier.fit(training_data[::,1::], classes)
else:
raise KeyError("Please enter a valid classification type (Decision Trees or Naive Bayes)")
def validation_test(classifier, validation_set, expected_class):
input classifer object, validation set (data frame), and expected class
of validation set (i.e. 1 or 0). Prints successful classification rate.
columns = list(validation_set.columns)
columns.remove('slave_logs')
validation_set = validation_set.as_matrix(columns)
predictions = classifier.predict(validation_set[::,1::])
counts = collections.Counter(predictions)
percent_correct = (counts[expected_class]/(len(predictions))* 100)
print('Validation set was classified as', expected_class,
round(percent_correct,2), '% of the time')
def predict_class(classifier, data_subset):
Predict class of data, and append predictions to data frame
try:
# drop old predictions before reclassifying (if they exist)
data_subset = data_subset.drop('predictions', axis = 1)
data_to_classify = data_subset.copy()
except:
data_to_classify = data_subset.copy()
pass
# convert to numpy and classify
columns = list(data_to_classify.columns)
columns.remove('slave_logs')
data_matrix = data_to_classify.as_matrix(columns)
predictions = classifier.predict(data_matrix[::,1::])
# revalue slave_log ID column to indicate classification
data_to_classify['slave_logs'] = predictions + 4
# print statstics
counts = collections.Counter(predictions)
for key in counts:
percent = (counts[key]/(len(predictions))* 100)
print(round(percent, 2), 'of data was classified as ', key)
# update slave_log columns
return data_to_classify
print('Testing validation data from slave logs data set')
validation_test(classifier, validation_set_2, 1)
print('Testing validation data from cliwoc data set:')
validation_test(classifier, validation_set_1, 1)
unclassified_logs = predict_class(classifier, unclassified_logs)
unclassified_logs.head()
# export PDF with decision tree
from sklearn.externals.six import StringIO
import os
import pydot
dot_data = StringIO()
tree.export_graphviz(new_classifier, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("test.pdf")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's get the data.
Step2: Let's find the best params set for some different models
Step3: - Linear Predictor
Step4: - Random Forest model
|
<ASSISTANT_TASK:>
Python Code:
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import predictor.feature_extraction as fe
import utils.preprocessing as pp
import utils.misc as misc
AHEAD_DAYS = 14
datasets_params_list_df = pd.read_pickle('../../data/datasets_params_list_df.pkl')
print(datasets_params_list_df.shape)
datasets_params_list_df.head()
train_days_arr = 252 * np.array([1, 2, 3])
params_list_df = pd.DataFrame()
for train_days in train_days_arr:
temp_df = datasets_params_list_df[datasets_params_list_df['ahead_days'] == AHEAD_DAYS].copy()
temp_df['train_days'] = train_days
params_list_df = params_list_df.append(temp_df, ignore_index=True)
print(params_list_df.shape)
params_list_df.head()
from predictor.dummy_mean_predictor import DummyPredictor
PREDICTOR_NAME = 'dummy'
# Global variables
eval_predictor = DummyPredictor()
step_eval_days = 60 # The step to move between training/validation pairs
params = {'eval_predictor': eval_predictor, 'step_eval_days': step_eval_days}
results_df = misc.parallelize_dataframe(params_list_df, misc.apply_mean_score_eval, params)
results_df['r2'] = results_df.apply(lambda x: x['scores'][0], axis=1)
results_df['mre'] = results_df.apply(lambda x: x['scores'][1], axis=1)
# Pickle that!
results_df.to_pickle('../../data/results_ahead{}_{}_df.pkl'.format(AHEAD_DAYS, PREDICTOR_NAME))
results_df['mre'].plot()
print('Minimum MRE param set: \n {}'.format(results_df.iloc[np.argmin(results_df['mre'])]))
print('Maximum R^2 param set: \n {}'.format(results_df.iloc[np.argmax(results_df['r2'])]))
from predictor.linear_predictor import LinearPredictor
PREDICTOR_NAME = 'linear'
# Global variables
eval_predictor = LinearPredictor()
step_eval_days = 60 # The step to move between training/validation pairs
params = {'eval_predictor': eval_predictor, 'step_eval_days': step_eval_days}
results_df = misc.parallelize_dataframe(params_list_df, misc.apply_mean_score_eval, params)
results_df['r2'] = results_df.apply(lambda x: x['scores'][0], axis=1)
results_df['mre'] = results_df.apply(lambda x: x['scores'][1], axis=1)
# Pickle that!
results_df.to_pickle('../../data/results_ahead{}_{}_df.pkl'.format(AHEAD_DAYS, PREDICTOR_NAME))
results_df['mre'].plot()
print('Minimum MRE param set: \n {}'.format(results_df.iloc[np.argmin(results_df['mre'])]))
print('Maximum R^2 param set: \n {}'.format(results_df.iloc[np.argmax(results_df['r2'])]))
from predictor.random_forest_predictor import RandomForestPredictor
PREDICTOR_NAME = 'random_forest'
# Global variables
eval_predictor = RandomForestPredictor()
step_eval_days = 60 # The step to move between training/validation pairs
params = {'eval_predictor': eval_predictor, 'step_eval_days': step_eval_days}
results_df = misc.parallelize_dataframe(params_list_df, misc.apply_mean_score_eval, params)
results_df['r2'] = results_df.apply(lambda x: x['scores'][0], axis=1)
results_df['mre'] = results_df.apply(lambda x: x['scores'][1], axis=1)
# Pickle that!
results_df.to_pickle('../../data/results_ahead{}_{}_df.pkl'.format(AHEAD_DAYS, PREDICTOR_NAME))
results_df['mre'].plot()
print('Minimum MRE param set: \n {}'.format(results_df.iloc[np.argmin(results_df['mre'])]))
print('Maximum R^2 param set: \n {}'.format(results_df.iloc[np.argmax(results_df['r2'])]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Links
|
<ASSISTANT_TASK:>
Python Code:
from metakernel import register_ipython_magics
register_ipython_magics()
%%processing
// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com
class GOL {
int w = 8;
int columns, rows;
// Game of life board
int[][] board;
GOL() {
// Initialize rows, columns and set-up arrays
columns = width/w;
rows = height/w;
board = new int[columns][rows];
//next = new int[columns][rows];
// Call function to fill array with random values 0 or 1
init();
}
void init() {
for (int i =1;i < columns-1;i++) {
for (int j =1;j < rows-1;j++) {
board[i][j] = int(random(2));
}
}
}
// The process of creating the new generation
void generate() {
int[][] next = new int[columns][rows];
// Loop through every spot in our 2D array and check spots neighbors
for (int x = 1; x < columns-1; x++) {
for (int y = 1; y < rows-1; y++) {
// Add up all the states in a 3x3 surrounding grid
int neighbors = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
neighbors += board[x+i][y+j];
}
}
// A little trick to subtract the current cell's state since
// we added it in the above loop
neighbors -= board[x][y];
// Rules of Life
max_neighbors = 3
min_neighbors = 2
if ((board[x][y] == 1) && (neighbors < min_neighbors)) next[x][y] = 0; // Loneliness
else if ((board[x][y] == 1) && (neighbors > max_neighbors)) next[x][y] = 0; // Overpopulation
else if ((board[x][y] == 0) && (neighbors == 3)) next[x][y] = 1; // Reproduction
else next[x][y] = board[x][y]; // Stasis
}
}
// Next is now our board
board = next;
}
// This is the easy part, just draw the cells, fill 255 for '1', fill 0 for '0'
void display() {
for ( int i = 0; i < columns;i++) {
for ( int j = 0; j < rows;j++) {
if ((board[i][j] == 1)) fill(0);
else fill(255);
stroke(0);
rect(i*w, j*w, w, w);
}
}
}
}
// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com
// A basic implementation of John Conway's Game of Life CA
// how could this be improved to use object oriented programming?
// think of it as similar to our particle system, with a "cell" class
// to describe each individual cell and a "cellular automata" class
// to describe a collection of cells
GOL gol;
void setup() {
size(640, 320);
frameRate(24);
gol = new GOL();
}
void draw() {
background(255);
gol.generate();
gol.display();
}
// reset board when mouse is pressed
void mousePressed() {
gol.init();
}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate the likelhood profiles
Step2: Now that we have a fitted SED, we can generate a plot from the results. A simple way to accomplish this is by using the show_spectrum.py example script.
|
<ASSISTANT_TASK:>
Python Code:
# Load the modules
import gammalib
import ctools
import cscripts
# Define the ctools install directory
import os
ct_dir = os.environ['CTOOLS']
os.environ['CALDB'] = ct_dir + '/share/caldb/'
# Configure some preliminary variables
inmodel = ct_dir + '/share/models/crab.xml'
caldb = 'prod2'
irf = 'North_5h'
emin = 0.1
emax = 100
# Simulate some data
sim = ctools.ctobssim()
sim['inmodel'] = inmodel
sim['caldb'] = caldb
sim['irf'] = irf
sim['edisp'] = False
sim['outevents'] = 'outfile.fits'
sim['prefix'] = 'sim_events_'
sim['ra'] = 83.63
sim['dec'] = 22.151
sim['rad'] = 5
sim['tmin'] = 0
sim['tmax'] = 7200
sim['emin'] = emin
sim['emax'] = emax
sim['deadc'] = 0.98
# Run the simulation (prevents writing to disk)
sim.run()
# Fit the data
fitter = ctools.ctlike(sim.obs())
fitter['edisp'] = False
fitter.run()
# Configure csspec
sed = cscripts.csspec(fitter.obs())
sed['outfile'] = 'sed_likelihood_profile.fits'
sed['caldb'] = caldb
sed['irf'] = irf
sed['srcname'] = 'Crab'
sed['emin'] = emin
sed['emax'] = emax
sed['enumbins'] = 10
sed['debug'] = True
# Parameters that control the likelihood profile generation
sed['dll_sigstep'] = 1
sed['dll_sigmax'] = 5
# Execute csspec & save results to file
sed.execute()
# Add the examples script directory to the path
import sys
sys.path.append(ct_dir + '/share/examples/python/')
# Plot the SED and likelihood profile
import show_spectrum as show_spectrum
show_spectrum.plot_spectrum(sed['outfile'].filename(), '')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here's a sqlite database for you to store the data once it's ready
Step2: =>YOUR TURN!
Step3: Exploring the Tabular Data
Step4: What do you see?
Step5: What can we infer from the summary statistics?
Step6: =>YOUR TURN!
Step7: Visually Exploring the Data
Step8: Now let's look at a histogram of the fares.
Step9: Dealing with Missing Values
Step10: =>YOUR TURN!
Step11: =>YOUR TURN!
Step12: Save Your Work
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas.io.sql as pd_sql
import sqlite3 as sql
%matplotlib inline
con = sql.connect("titanic.db")
# Use pandas to open the csv.
# You'll have to put in the filepath
# It should look something like "../titanic/data/train.csv"
df =
# Use pandas to view the first 10 rows.
# Use pandas to get the summary statistics.
# Use pandas to get the median age.
# Use pandas to count the number of unique Ticket values.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(df['Age'], bins = 10, range = (df['Age'].min(),df['Age'].max()))
plt.title('Age distribution')
plt.xlabel('Age')
plt.ylabel('Count of Passengers')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(df['Fare'], bins = 10, range = (df['Fare'].min(),df['Fare'].max()))
plt.title('Fare distribution')
plt.xlabel('Fare')
plt.ylabel('Count of Passengers')
plt.show()
# Use pandas to sum the null Cabin values.
# Use pandas to drop the Ticket column.
# Use pandas to get the mean Age.
# Use pandas to fill in the null Age values with the mean.
# Use pandas to save your dataframe to a sqlite database.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.
Step3: Extract the dataset from the compressed .tar.gz file.
Step4: Problem 1
Step5: Now let's load the data in a more manageable format.
Step6: Problem 2
Step7: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
Step8: Problem 3
Step9: Problem 4
Step10: Prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed.
Step11: Finally, let's save the data for later reuse
Step13: Problem 5
Step14: Problem 6
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
import tarfile
import urllib
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
import cPickle as pickle
url = 'http://yaroslavvb.com/upload/notMNIST/'
def maybe_download(filename, expected_bytes):
Download a file if not present, and make sure it's the right size.
if not os.path.exists(filename):
filename, _ = urllib.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print 'Found and verified', filename
else:
raise Exception(
'Failed to verify' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
def extract(filename):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if not os.path.exists(root):
tar = tarfile.open(filename)
tar.extractall()
tar.close()
data_folders = [os.path.join(root, d) for d in sorted(os.listdir(root))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print data_folders
return data_folders
train_folders = extract(train_filename)
test_folders = extract(test_filename)
display(Image("notMNIST_small/A/MDEtMDEtMDAudHRm.png"), Image("notMNIST_small/D/MDEtMDEtMDAudHRm.png"))
display(Image("notMNIST_small/D/MDEtMDEtMDAudHRm.png"))
display(Image("notMNIST_small/C/MDEtMDEtMDAudHRm.png"))
display(Image("notMNIST_small/B/MDEtMDEtMDAudHRm.png"))
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load(data_folders, min_num_images, max_num_images):
dataset = np.ndarray(
shape=(max_num_images, image_size, image_size), dtype=np.float32)
labels = np.ndarray(shape=(max_num_images), dtype=np.int32)
label_index = 0
image_index = 0
for folder in data_folders:
print folder
for image in os.listdir(folder):
if image_index >= max_num_images:
raise Exception('More images than expected: %d >= %d' % (
num_images, max_num_images))
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[image_index, :, :] = image_data
labels[image_index] = label_index
image_index += 1
except IOError as e:
print 'Could not read:', image_file, ':', e, '- it\'s ok, skipping.'
label_index += 1
num_images = image_index
dataset = dataset[0:num_images, :, :]
labels = labels[0:num_images]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' % (
num_images, min_num_images))
print 'Full dataset tensor:', dataset.shape
print 'Mean:', np.mean(dataset)
print 'Standard deviation:', np.std(dataset)
print 'Labels:', labels.shape
return dataset, labels
train_dataset, train_labels = load(train_folders, 450000, 550000)
test_dataset, test_labels = load(test_folders, 18000, 20000)
plt.imshow(train_dataset[np.random.randint(train_dataset.shape[0])])
np.random.seed(133)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
#plt.imshow(train_dataset[np.random.randint(train_dataset.shape[0])])
plt.pcolor(train_dataset[np.random.randint(train_dataset.shape[0])], cmap="Greys")
fig, ax = plt.subplots(1,2)
bins = np.arange(train_labels.min(), train_labels.max()+2)
ax[0].hist(train_labels, bins=bins)
ax[0].set_xticks((bins[:-1]+bins[1:])/2, [chr(k) for k in range(ord("A"), ord("J")+1)])
ax[0].set_title("Training data")
bins = np.arange(test_labels.min(), test_labels.max()+2)
ax[1].hist(test_labels, bins=bins)
ax[1].set_xticks((bins[:-1]+bins[1:])/2, [chr(k) for k in range(ord("A"), ord("J")+1)])
ax[1].set_title("Test data")
[chr(k) for k in range(ord("A"), ord("J")+1)]
print (bins[:-1]+bins[1:])/2
print train_labels.min(), train_labels.max()
train_size = 200000
valid_size = 10000
valid_dataset = train_dataset[:valid_size,:,:]
valid_labels = train_labels[:valid_size]
train_dataset = train_dataset[valid_size:valid_size+train_size,:,:]
train_labels = train_labels[valid_size:valid_size+train_size]
print 'Training', train_dataset.shape, train_labels.shape
print 'Validation', valid_dataset.shape, valid_labels.shape
pickle_file = 'notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print 'Unable to save data to', pickle_file, ':', e
raise
statinfo = os.stat(pickle_file)
print 'Compressed pickle size:', statinfo.st_size
def find_duplicates(d1, l1, d2, l2):
d1: data 1 - smaller
d2: data 2 - larger
print d1.shape, d2.shape
total_items = d1.shape[0] + d2.shape[0]
overlap_items = 0
for l in range(10):
# Check only items with same labels
idx1 = (l1 == l)
idx2 = (l2 == l)
m1 = d1[idx1]
m2 = d2[idx2]
for it1 in m1:
overlap_items += np.sum(np.sum(np.abs(m2 - it1), axis=(-1,-2)) == 0)
return overlap_items * 100. / total_items
print "Overlap between Validation and Test : ", find_duplicates(valid_dataset, valid_labels, test_dataset, test_labels)
np.sum(np.random.randn(3,4,5), axis=(-1,-2,-3))
#print "Overlap between Validation and Train : ", find_duplicates(valid_dataset, valid_labels, train_dataset, train_labels)
#print "Overlap between Test and Train : ", find_duplicates(test_dataset, test_labels, train_dataset, train_labels)
n_train = -1
X_train, y_train = train_dataset[:n_train].reshape(-1, train_dataset.shape[1]*train_dataset.shape[2]), train_labels[:n_train]
model = LogisticRegression(multi_class="multinomial", solver="lbfgs")
model.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, classification_report
print "Predictions"
labels = [chr(k) for k in range(ord("A"), ord("J")+1)]
n_val = 10000
X_val, y_val = valid_dataset[:n_val].reshape(-1, valid_dataset.shape[1]*train_dataset.shape[2]), valid_labels[:n_val]
print X_val.shape, y_val.shape
y_pred = model.predict(X_val)
print "Confusion matrix: "
plt.pcolor(confusion_matrix(y_pred, y_val), cmap="Blues")
print "Score: ", classification_report(y_pred, y_val, target_names=labels)
n_vis = 10
n_cols = 5
n_rows = n_vis/ n_cols
idx = np.random.randint(valid_dataset.shape[0], size=n_vis)
X_vis, y_vis = valid_dataset[idx].reshape(-1, valid_dataset.shape[1]*valid_dataset.shape[2]), valid_labels[idx]
y_pred = model.predict(X_vis)
fig, ax = plt.subplots(n_rows, n_cols, sharex=True, sharey=True, figsize=(n_rows, n_cols))
fig.set_size_inches(10*n_rows, 5*n_cols)
for i, axi in enumerate(ax.flatten()):
axi.pcolor(X_vis[i].reshape(valid_dataset.shape[1], valid_dataset.shape[2]), cmap="Blues")
axi.set_title("True: %s, Predicted: %s" % (labels[y_vis[i]], labels[y_pred[i]]))
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.