Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
12,500
|
<ASSISTANT_TASK:>
Python Code:
!pip install dicttoxml
import json
from dicttoxml import dicttoxml
PETS = {}
PET_STATUS_INDEX = {}
TAG_INDEX = {}
ORDERS = {}
ORDER_STATUS_INDEX = {}
JSON = 'application/json'
XML = 'application/xml'
content_type = JSON
class MissingField(Exception):
def __init__(self, type_name, field):
self.msg = '{} is missing required field "{}"'.format(type_name, field)
class InvalidValue(Exception):
def __init__(self, name, type_name):
self.msg = '{} is not a {}'.format(name, type_name)
class NotFound(Exception):
def __init__(self, type_name, id):
self.msg = 'There is no {} with id {}'.format(type_name, id)
def print_response(content, content_type=JSON):
if content_type == JSON:
print(json.dumps(content))
elif content_type == XML:
print(dicttoxml(content).decode('UTF-8'))
def split_query_param(param):
values = []
for paramValue in param:
values += paramValue.split(',')
values = map(lambda x: x.strip(), values)
return list(values)
def create_error_response(code, error_type, message):
return {
'code' : code,
'type' : error_type,
'message' : message
}
# Pet APIs
def validate_pet(pet):
fields = ['id', 'category', 'name', 'photoUrls', 'tags', 'status']
for field in fields:
if field not in pet:
raise MissingField('Pet', field)
def persist_pet(pet):
validate_pet(pet)
PETS[pet['id']] = pet
index_pet(pet)
return pet
def get_pet_by_id(pet_id):
try:
pet_id = int(pet_id)
if not pet_id in PETS:
raise NotFound('Pet', pet_id)
else:
return PETS[pet_id]
except ValueError:
raise InvalidValue('Pet id', 'int')
def delete_pet_by_id(pet_id):
try:
pet_id = int(pet_id)
if not pet_id in PETS:
raise NotFound('Pet', pet_id)
else:
pet = PETS[pet_id]
del PETS[pet_id]
return pet
except ValueError:
raise InvalidValue('Pet id', 'int')
def index_pet(pet):
# Index the status of the pet
pet_status = pet['status']
if pet_status not in PET_STATUS_INDEX:
PET_STATUS_INDEX[pet_status] = set()
PET_STATUS_INDEX[pet_status].add(pet['id'])
# index the tags of the pet
for tag in pet['tags']:
tag = tag.strip()
if tag not in STATUS_INDEX:
TAG_INDEX[tag] = set()
TAG_INDEX[tag].add(pet['id'])
def collect_pets_by_id(petIds):
petIds = set(petIds)
petList = []
for petId in petIds:
petList.append(PETS[petId])
return petList
# Order APIs
def validate_order(order):
fields = ['id', 'petId', 'quantity', 'shipDate', 'status', 'complete']
for field in fields:
if field not in order:
raise MissingField('Order', field)
def persist_order(order):
validate_order(order)
ORDERS[order['id']] = order
def get_order_by_id(order_id):
try:
order_id = int(order_id)
if not order_id in ORDERS:
raise NotFound('Order', order_id)
else:
return ORDERS[order_id]
except ValueError:
raise InvalidValue('Order id', 'int')
def delete_order_by_id(order_id):
try:
order_id = int(order_id)
if not order_id in ORDERS:
raise NotFound('Order', order_id)
else:
order = ORDERS[order_id]
del ORDERS[order_id]
return order
except ValueError:
raise InvalidValue('Order id', 'int')
REQUEST = json.dumps({
'body' : {
'id': 1,
'category' : {
'id' : 1,
'name' : 'cat'
},
'name': 'fluffy',
'photoUrls': [],
'tags': ['cat', 'siamese'],
'status': 'available'
}
})
# POST /pet
try:
req = json.loads(REQUEST)
pet = req['body']
persist_pet(pet)
response = pet
except MissingField as e:
response = create_error_response(405, 'Invalid Pet', e.msg)
except ValueError as e:
response = create_error_response(405, 'Invalid Pet', 'Could not parse json')
finally:
print_response(response, content_type)
REQUEST = json.dumps({
'body' : {
'id': 1,
'category' : {
'id' : 1,
'name' : 'cat'
},
'name': 'fluffy',
'photoUrls': [],
'tags': ['cat', 'siamese'],
'status': 'available'
}
})
# PUT /pet
try:
req = json.loads(REQUEST)
new_pet = req['body']
current_pet = get_pet_by_id(new_pet['id'])
persist_pet(new_pet)
response = new_pet
except InvalidValue as e:
response = create_error_response(400, 'Invalid ID', e.msg)
except ValueError as e:
response = create_error_response(400, 'Invalid Pet', 'Could not parse json')
except NotFound as e:
response = create_error_response(404, 'Not Found', e.msg)
except MissingField as e:
response = create_error_response(405, 'Invalid Pet', e.msg)
finally:
print_response(response, content_type)
REQUEST = json.dumps({
'args' : {
'status' : ['available , unavailable']
}
})
# GET /pet/findByStatus
req = json.loads(REQUEST)
status_list = split_query_param(req['args']['status'])
pet_ids = []
for status in status_list:
if status in PET_STATUS_INDEX:
pet_ids += PET_STATUS_INDEX[status]
pet_list = collect_pets_by_id(pet_ids)
print_response(pet_list, content_type)
REQUEST = json.dumps({
'args' : {
'tags' : ['cat , dog, horse']
}
})
# GET /pet/findByTags
req = json.loads(REQUEST)
tag_list = split_query_param(req['args']['tags'])
pet_ids = []
for tag in tag_list:
if tag in TAG_INDEX:
pet_ids += TAG_INDEX[tag]
pet_list = collect_pets_by_id(pet_ids)
print_response(pet_list, content_type)
REQUEST = json.dumps({
'path' : {
'petId' : 1
}
})
# GET /pet/:petId
try:
req = json.loads(REQUEST)
pet_id = req['path']['petId']
response = get_pet_by_id(pet_id)
except InvalidValue as e:
response = create_error_response(400, 'Invalid ID', e.msg)
except NotFound as e:
response = create_error_response(404, 'Not Found', e.msg)
finally:
print_response(response, content_type)
REQUEST = json.dumps({
'path' : {
'petId' : 1
},
'body' : {
'name' : ['new name']
}
})
# POST /pet/:petId
try:
req = json.loads(REQUEST)
pet_updates = req['body']
pet_id = req['path']['petId']
old_pet = get_pet_by_id(pet_id)
props = ['name', 'status']
for prop in props:
if prop in pet_updates:
old_pet[prop] = pet_updates[prop][0]
response = persist_pet(old_pet)
except InvalidValue as e:
response = create_error_response(400, 'Invalid ID', e.msg)
except NotFound as e:
response = create_error_response(404, 'Not Found', e.msg)
finally:
print_response(response, content_type)
REQUEST = json.dumps({
'path' : {
'petId' : '1'
}
})
# DELETE /pet/:petId
try:
req = json.loads(REQUEST)
pet_id = req['path']['petId']
response = delete_pet_by_id(pet_id)
except InvalidValue as e:
response = create_error_response(400, 'Invalid ID', e.msg)
except NotFound as e:
response = create_error_response(404, 'Not Found', e.msg)
finally:
print_response(response, content_type)
# GET /store/inventory
status_counts = {}
for status in ORDER_STATUS_INDEX:
status_counts[status] = len(set(ORDER_STATUS_INDEX[status]))
print_response(status_counts, content_type)
REQUEST = json.dumps({
'body' : {
'id' : 1,
'petId' : 1,
'quantity' : 1,
'shipDate' : '12/30/2015',
'status' : 'placed',
'complete' : False
}
})
# POST /store/order
try:
req = json.loads(REQUEST)
order = req['body']
persist_order(order)
response = order
except MissingField as e:
response = create_error_response(400, 'Invalid Order', e.msg)
except ValueError as e:
response = create_error_response(400, 'Invalid Order', 'Could not parse json')
finally:
print_response(response, content_type)
REQUEST = json.dumps({
'path' : {
'orderId' : 1
}
})
# GET /store/order/:orderId
try:
req = json.loads(REQUEST)
order_id = req['path']['orderId']
response = get_order_by_id(order_id)
except InvalidValue as e:
response = create_error_response(400, 'Invalid ID', e.msg)
except NotFound as e:
response = create_error_response(404, 'Not Found', e.msg)
finally:
print_response(response, content_type)
REQUEST = json.dumps({
'path' : {
'orderId' : 1
}
})
# DELETE /store/order/:orderId
try:
req = json.loads(REQUEST)
order_id = req['path']['orderId']
response = delete_order_by_id(order_id)
except InvalidValue as e:
response = create_error_response(400, 'Invalid ID', e.msg)
except NotFound as e:
response = create_error_response(404, 'Not Found', e.msg)
finally:
print_response(response, content_type)
PETS = {}
STATUS_INDEX = {}
TAG_INDEX = {}
ORDERS = {}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: POST /pet
Step2: PUT /pet
Step3: GET /pet/findByStatus
Step4: GET /pet/findByTags
Step5: GET /pet/
Step6: POST /pet/
Step7: DELETE /pet/
Step8: Store Endpoints
Step9: POST /store/order
Step10: GET /store/order/
Step11: DELETE /store/order/
Step12: Initialization
|
12,501
|
<ASSISTANT_TASK:>
Python Code:
# Imports
import os
import sys
import pandas as pd
import seaborn as sb
# Custom Imports
sys.path.insert(0, '../../')
import stats_toolbox as st
from stats_toolbox.utils.data_loaders import load_fem_preg_2002
# Graphics setup
%pylab inline --no-import-all
sb.set_context('notebook', font_scale=1.5)
from stats_toolbox.utils.data_loaders import load_fem_preg_2002
# Load and Clean Data
df = load_fem_preg_2002('../data')
full_term = df[df['prglngth'] >= 37]
weights = df.birthwgt_kg.dropna()
H = st.Hist(df.totalwgt_lb, label='Birth Weight')
H.freq(8)
H[8]
H.plot(title='My First Histogram', xlabel='Wieght (lb)', legend=True)
H.plot(mpl_hist=True, bins=30, histtype='stepfilled', alpha=0.7,
title='My Second Histogram', xlabel='Wieght (lb)', legend=True)
hist1 = st.Hist(np.repeat(np.arange(0,20,2), np.arange(20,0,-2)), label='Hist 1')
hist2 = st.Hist(np.repeat(np.arange(10,30,2), np.arange(20,0,-2)), label='Hist 2')
f = st.multiplot([hist1, hist2], title='My Title', ylim=(0,22))
f = st.multiplot([hist1, hist2], plt_kwds={'mpl_hist': True, 'bins': 10},
title='My Title', ylim=(0,22))
# Find the n largest/smallest values and thier frequencies
top20 = H.largest(20)
bottom5 = H.smallest(5)
# Find the most/least frequent values
top5feq = H.most_freq(5)
bottom5freq = H.least_freq(5)
# Calculate stats
print(H.mean(), H.var(), H.std())
hist1 = st.Hist(np.repeat(np.arange(0,20,1), np.arange(20,0,-1)))
# Increment or multiply, changes are made inplace
hist1.incr(4, 5)
hist1.mult(10, 2.5)
hist1.plot()
# Histogram Arithmatic
hist1 = st.Hist(np.repeat(np.arange(0,20,1), np.arange(20,0,-1)))
hist2 = st.Hist(np.repeat(np.arange(10,30,1), np.arange(20,0,-1)))
hist3 = hist1 + hist2
hist3.plot()
(hist1 - hist2).plot(ylim=(-12, 22))
H_first = st.Hist(full_term.prglngth[full_term.birthord == 1], label='1st born')
H_other = st.Hist(full_term.prglngth[full_term.birthord != 1], label='other')
(H_first - H_other).plot(title = 'Differences in Pregnancy length between first and other babies')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Constructing Histograms
Step3: Getting frequencies for values
Step4: or equivilantly
Step5: Visualising
Step6: By default the plotting method used is pyplot.bar to plot each data points frequency.
Step7: multiple plots are possible with the multiplot command that plots multiple objects on a single axes
Step8: specific plotting parameters can be passed via the plt_kwds argument
Step9: Methods
Step10: Manipulation
|
12,502
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
VS = 230.0 # Secondary voltage (V)
amps = arange(0, 65.2, 6.52) # Current values (A)
Req = 0.0445 # Equivalent R (ohms)
Xeq = 0.0645 # Equivalent X (ohms)
I = amps * array ([[0.8 - 0.6j], # Lagging
[1.0], # Unity
[0.8 + 0.6j]]) # Leading
VPa = VS + Req * I + 1j * Xeq * I
VR = (abs(VPa) - VS) / VS * 100;
rc('text', usetex=True) # enable LaTeX commands for plot
plot(amps,VR[0,])
plot(amps,VR[1,])
plot(amps,VR[2,])
title(r'\textbf{Voltage Regulation Versus Load}');
xlabel(r'\textbf{Load (A)}');
ylabel(r'\textbf{Voltage Regulation (\%)}');
legend(('0.8 PF lagging','1.0 PF','0.8 PF leading'), loc=2);
grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define all the parameters
Step2: Calculate the current values for the three power factors.
Step3: Calculate VP/a
Step4: Calculate voltage regulation
Step5: Plot the voltage regulation
|
12,503
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
'''step 1'''
# Load all necessary modules here, for clearness
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# from torchvision.datasets import MNIST
import torchvision
from torchvision import transforms
from torch.optim import lr_scheduler
# from tensorboardX import SummaryWriter
from collections import OrderedDict
from matplotlib import pyplot as plt
'''step 2'''
# Whether to put data in GPU according to GPU is available or not
# cuda = torch.cuda.is_available()
# In case the default gpu does not have enough space, you can choose which device to use
# torch.cuda.set_device(device) # device: id
# Since gpu in lab is not enough for your guys, we prefer to cpu computation
device = torch.device("cuda:2")
'''step 3'''
# However, in the same exp, seed for torch and numpy doesn't be the same.
SEED = 47
# Sets the seed for generating random numbers, including GPU and CPU
torch.manual_seed(SEED)
# Deterministic algorithm for convolutional ops
torch.backends.cudnn.deterministic = True
# Deterministic alogorithm for cudnn, otherwise, cuddn would choose the fastest algorithm for every
# iteration ops, which cause variability and time consuming if input changes frequently.
torch.backends.cudnn.benchmark = False
# Seed the generator for Numpy
np.random.seed(SEED)
# Example to use kFold
from sklearn.model_selection import KFold
import numpy as np
train_transform = transforms.Compose([
])
dataset = torchvision.datasets.CIFAR10(root='./data',
train=True, transform=train_transform,download=True)
data = dataset.train_data
# dataset.train_labels gets list object, we should transform to numpy for convinience
label = np.array(dataset.train_labels)
# set numpy random seed, we can get a determinate k-fold dataset
# np.random.seed(1)
kf = KFold(n_splits=5,shuffle=True)
for train_index, test_index in kf.split(data):
print('train_index', train_index, 'test_index', test_index)
train_data, train_label = data[train_index], label[train_index]
test_data, test_label = data[test_index], label[test_index]
# here we use the last fold to be our trainset
dataset.train_data = train_data
dataset.train_labels = list(train_label)
def get_mean_std(dataset, ratio=0.01):
Get mean and std by sample ratio
dataloader = torch.utils.data.DataLoader(dataset, batch_size=int(len(dataset)*ratio),
shuffle=True, num_workers=10)
train = iter(dataloader).next()[0]
mean = np.mean(train.numpy(), axis=(0,2,3))
std = np.std(train.numpy(), axis=(0,2,3))
return mean, std
# cifar10
train_dataset = torchvision.datasets.CIFAR10('./data',
train=True, download=False,
transform=transforms.ToTensor())
test_dataset = torchvision.datasets.CIFAR10('./data',
train=False, download=False,
transform=transforms.ToTensor())
train_mean, train_std = get_mean_std(train_dataset)
test_mean, test_std = get_mean_std(test_dataset)
print(train_mean, train_std)
print(test_mean,test_std)
'''step 4'''
# set hyper parameter
batch_size = 32
n_epochs = 50
learning_rate = 1e-3
%matplotlib inline
from PIL import Image
# rotate 30°
transform_rotate = transforms.RandomRotation((30,30))
transform_horizontalflip = transforms.RandomHorizontalFlip(p=1)
transform_verticalflip = transforms.RandomVerticalFlip(p=1)
# the first image in cifar10 trainset
img = Image.open('./img/example.jpg')
plt.imshow(img)
plt.axis('off')
plt.show()
# a cat image
img1 = Image.open('./img/cat.jpeg')
plt.imshow(img1)
plt.axis('off')
plt.show()
# rotate the image
img2 = transform_rotate(img1)
plt.imshow(img2)
plt.axis('off')
plt.show()
# horizontal flip the image
img3 = transform_horizontalflip(img1)
plt.imshow(img3)
plt.axis('off')
plt.show()
# vertical flip the image
img4 = transform_verticalflip(img1)
plt.imshow(img4)
plt.axis('off')
plt.show()
'''step 5'''
'''
the mean and variance below are from get_mean_std() function, every time you run the above function may get
defferent value, because we use sampling
'''
'''
notice: we usually will not use the dataset above, because its transform function has been change the numpy
data to tensor data, but those transformations such as filp, rotation, crop, pad should be done before the
data transforms to tensor.
'''
# transform1
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(10),
transforms.ToTensor(), # Convert a PIL Image or numpy.ndarray to tensor.
# Normalize a tensor image with mean 0.1307 and standard deviation 0.3081
transforms.Normalize((0.4931373, 0.48048604, 0.44251624), (0.24307655, 0.23882599, 0.25874203))
])
# transform2
# train_transform = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.48723358, 0.48176482, 0.45129994), (0.24020454, 0.23832673, 0.25824794))
])
train_dataset = torchvision.datasets.CIFAR10(root='./data',
train=True,
transform=train_transform,
download=True)
test_dataset = torchvision.datasets.CIFAR10(root='./data',
train=False,
transform=test_transform,
download=False)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
resnet18 = torchvision.models.resnet18()
print(resnet18)
'''step 6'''
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18,self).__init__()
original_model = torchvision.models.resnet18()
original_model.conv1.stride = 1
self.feature_extractor = nn.Sequential(
*(list(original_model.children())[0:3]),
*(list(original_model.children())[4:-2]),
nn.AdaptiveAvgPool2d(1)
)
self.fc = nn.Linear(512,10)
def forward(self, x):
out1 = self.feature_extractor(x)
out1 = out1.view(out1.size(0),-1)
out2 = self.fc(out1)
return out2
'''step 7'''
# create a model object
# model = torchvision.models.resnet18()
# model.avgpool = nn.AdaptiveAvgPool2d(1)
# model.fc = nn.Linear(512,10)
model = ResNet18()
model.to(device)
# Cross entropy
loss_fn = torch.nn.CrossEntropyLoss()
# l2_norm can be done in SGD
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
'''step 8'''
def train(train_loader, model, loss_fn, optimizer,device):
train model using loss_fn and optimizer. When thid function is called, model trains for one epoch.
Args:
train_loader: train data
model: prediction model
loss_fn: loss function to judge the distance between target and outputs
optimizer: optimize the loss function
Returns:
total_loss: loss
# set the module in training model, affecting module e.g., Dropout, BatchNorm, etc.
model.train()
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad() # clear gradients of all optimized torch.Tensors'
outputs = model(data) # make predictions
loss = loss_fn(outputs, target) # compute loss
total_loss += loss.item() # accumulate every batch loss in a epoch
loss.backward() # compute gradient of loss over parameters
optimizer.step() # update parameters with gradient descent
average_loss = total_loss / batch_idx # average loss in this epoch
return average_loss
'''step 9'''
def evaluate(loader, model, loss_fn, device):
test model's prediction performance on loader.
When thid function is called, model is evaluated.
Args:
loader: data for evaluation
model: prediction model
loss_fn: loss function to judge the distance between target and outputs
Returns:
total_loss
accuracy
# context-manager that disabled gradient computation
with torch.no_grad():
# set the module in evaluation mode
model.eval()
correct = 0.0 # account correct amount of data
total_loss = 0 # account loss
for batch_idx, (data, target) in enumerate(loader):
data, target = data.to(device), target.to(device)
outputs = model(data) # make predictions
# return the maximum value of each row of the input tensor in the
# given dimension dim, the second return vale is the index location
# of each maxium value found(argmax)
_, predicted = torch.max(outputs, 1)
# Detach: Returns a new Tensor, detached from the current graph.
#The result will never require gradient.
correct += (predicted == target).cpu().sum().detach().numpy()
loss = loss_fn(outputs, target) # compute loss
total_loss += loss.item() # accumulate every batch loss in a epoch
accuracy = correct*100.0 / len(loader.dataset) # accuracy in a epoch
average_loss = total_loss / len(loader)
return average_loss, accuracy
'''step 10'''
def fit(train_loader, val_loader, model, loss_fn, optimizer, n_epochs, device):
train and val model here, we use train_epoch to train model and
val_epoch to val model prediction performance
Args:
train_loader: train data
val_loader: validation data
model: prediction model
loss_fn: loss function to judge the distance between target and outputs
optimizer: optimize the loss function
n_epochs: training epochs
Returns:
train_accs: accuracy of train n_epochs, a list
train_losses: loss of n_epochs, a list
train_accs = [] # save train accuracy every epoch
train_losses = [] # save train loss every epoch
test_accs = []
test_losses = []
# scheduler = lr_scheduler.StepLR(optimizer,step_size=6,gamma=0.1)
for epoch in range(n_epochs): # train for n_epochs
# train model on training datasets, optimize loss function and update model parameters
# change the learning rate at any epoch you want
# if n_epochs % 6 == 0 and n_epochs != 0:
# lr = lr * 0.1
# for param_group in optimizer.param_groups:
# param_groups['lr'] = lr
train_loss= train(train_loader, model, loss_fn, optimizer, device=device)
# evaluate model performance on train dataset
_, train_accuracy = evaluate(train_loader, model, loss_fn, device=device)
# change the learning rate by scheduler
# scheduler.step()
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, \
n_epochs, train_loss, train_accuracy)
print(message)
# save loss, accuracy
train_accs.append(train_accuracy)
train_losses.append(train_loss)
show_curve(train_accs,'tranin_accs')
show_curve(train_losses,'train_losses')
# evaluate model performance on val dataset
val_loss, val_accuracy = evaluate(val_loader, model, loss_fn, device=device)
test_accs.append(val_accuracy)
test_losses.append(val_loss)
show_curve(test_accs,'test_accs')
show_curve(test_losses,'test_losses')
message = 'Epoch: {}/{}. Validation set: Average loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, \
n_epochs, val_loss, val_accuracy)
print(message)
return train_accs, train_losses
'''step 10'''
def show_curve(ys, title):
plot curlve for Loss and Accuacy
!!YOU CAN READ THIS LATER, if you are interested
Args:
ys: loss or acc list
title: Loss or Accuracy
x = np.array(range(len(ys)))
y = np.array(ys)
plt.plot(x, y, c='b')
plt.axis()
plt.title('{} Curve:'.format(title))
plt.xlabel('Epoch')
plt.ylabel('{} Value'.format(title))
plt.show()
'''step 12'''
train_accs, train_losses = fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, device=device)
# to save the parameters of a model
torch.save(model.state_dict(), './params/resnet18_params.pt')
# to save the model
torch.save(model, './params/resnet18.pt')
model1 = torchvision.models.resnet18()
print(model1.state_dict())
# generate a parameters file of model1
torch.save(model1.state_dict(),'./params/resnet18_params.pt')
model2 = torchvision.models.resnet18()
model2.fc = nn.Linear(512,10)
'''
parameters are saved as dict in .pt file, so, we should get its keys first.
'''
print(model2.state_dict().keys())
pretrained_params = torch.load('./params/resnet18_params.pt')
model2_params = model2.state_dict()
for (pretrained_key,pretrained_val), (model2_key,model2_val) in zip(list(pretrained_params.items()),list(model2_params.items())):
model2_params[model2_key] = pretrained_val
if model2_key == 'layer4.1.bn2.num_batches_tracked':
break
# don't forget to load parametes dict to your model!!!
model2.load_state_dict(model2_params)
print(model1.state_dict())
print(model2.state_dict())
n_epochs = 50
learning_rate = 0.005
model = ResNet18()
model.to(device)
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
train_accs, train_losses = fit(train_loader, test_loader, model, loss_fn, optimizer, n_epochs, device=device)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.1 Commonly required module
Step2: 1.2 Random seed setting for reproducibility
Step3: 2. Data split and Cross Validatioin
Step5: 2.2 Calculating Mean and Std for training dataset
Step6: 2.3 Hyper parameter
Step7: 2.4 Data augmentation
Step8: 3. classificatioon network
Step9: 3.2 Modified Resnet18 for out train dataset
Step10: 4. Training
Step13: 4.2 Initialize model parameters
Step16: Define function fit and use train_epoch and test_epoch
Step17: If you want design a model, in its convolution layers, the original learning rate is 0.1, while the original learning rate of linear layer is 0.01, and you want to nine times smaller its learning rate every 10 epoch. Please write down your solution by pseudo code.
Step18: You can get your the performance of your model at test set in every epoch. so, you can save the best model during the training period. Please add this function into your train() function
Step19: Now, we want to load model1's parameters to model2, however, they have different structure, we can't simply use model2.load() or model2.load_state_dict(). we should load part of the parameters of the model1 to model2. For examlpe, if we want to load the convolution layers parameters, we can do like below.
Step20: We can see the last layer of convolution is 'layer4.1.bn2.num_batches_tracked'. Now, let's load it to our new model.
Step21: 6. Emsemble
|
12,504
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
!wget --no-check-certificate https://openaq-data.s3.amazonaws.com/2018-04-06.csv -P /Users/nipun/Downloads/
import pandas as pd
df = pd.read_csv("/Users/nipun/Downloads/2018-04-06.csv")
df = df[(df.country=='IN')&(df.parameter=='pm25')].dropna().groupby("location").mean()
df
!wget --no-check-certificate https://raw.githubusercontent.com/python-visualization/folium/master/examples/data/world-countries.json
import json
e = json.load(open('world-countries.json','r'))
json.dump(e['features'][73], open('india.json','w'))
import folium
folium_map = folium.Map(width = '60%',height=800,location=[20, 77],
zoom_start=5,
tiles="Stamen Terrain",min_lat=7, max_lat=35, min_lon=73, max_lon=90)
for x in df.iterrows():
name = x[0]
lat, lon = x[1]['latitude'], x[1]['longitude']
folium.CircleMarker([lat, lon], radius=5, color='#000000',fill_color='#D3D3D3' , fill_opacity=1).add_to(folium_map)
folium.GeoJson('india.json').add_to(folium_map)
folium_map
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading data from OpenAQ for 2018-04-06
Step2: Downloading World GeoJson file
Step3: Creating india.json correspdonding to Indian data
|
12,505
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import sqlite3
import pandas as pd
import seaborn as sns
sns.set_style("white")
conn = sqlite3.connect('../data/output/database.sqlite')
c = conn.cursor()
def execute(sql):
'''
Executes a SQL command on the 'c' cursor and returns the results
'''
c.execute(sql)
return c.fetchall()
def printByYear(data):
'''
Given a list of tuples with (year, data), prints the data next to corresponding year
'''
for datum in data:
print "{0}: {1}".format(datum[0], datum[1])
years = {1996:1.46, 1997:1.43, 1998:1.4, 1999:1.38, 2000:1.33, 2001:1.3, 2002:1.28, 2003:1.25,
2004:1.22, 2005:1.18, 2006:1.14, 2007:1.11, 2008:1.07, 2009:1.07, 2010:1.05, 2011:1.02, 2012:1}
def adjustForInflation(value, year):
if value == None:
return
return value * years[year]
# Median loan debt of those who graduate programs
debt = execute(SELECT year, grad_debt_mdn
FROM Scorecard
WHERE year%2=0 AND grad_debt_mdn IS NOT NULL)
def graphDistForYears(data, year1, year2):
data1 = pd.DataFrame()
data2 = pd.DataFrame()
# 1.4 is adjusting for inflation
data1['x'] = [1.4 * row[1] for row in data if row[0]==year1 and (isinstance(row[1], float) or isinstance(row[1], int))]
data2['x'] = [row[1] for row in data if row[0]==year2 and (isinstance(row[1], float) or isinstance(row[1], int))]
sns.distplot(data1, kde=False)
sns.distplot(data2, kde=False)
# Years with meadian student debt
from sets import Set
years = Set()
for row in debt:
years.add(row[0])
print years
graphDistForYears(debt, 1998, 2012)
# Graph makes it look like there mgith be anomalies in the data such as negative debt, but
# this is just a quirk of the regression and not an actual fact
print len([row[1] for row in debt if isinstance(row[1], float) and row[1] < 0])
# Net tuition revenue per student for a institution
tuitionRev = execute(SELECT year, tuitfte, instnm, ugds
FROM Scorecard
WHERE tuitfte IS NOT NULL and main='Main campus' and ugds>1000)
graphDistForYears(tuitionRev, 1998, 2012)
top10 = [[0,0,0] for i in range(0, 10)]
for inst in [row for row in tuitionRev if row[0]==1998]:
i = 0
done = False
while i < len(top10) and not done:
if (top10[i][1] < inst[1]):
top10[i][0] = inst[2]
top10[i][1] = inst[1]
top10[i][2] = inst[3]
done = True
i += 1
for top in top10:
print "{0} -- {1} -- {2}".format(top[0], top[1], top[2])
expenses = execute(SELECT year, inexpfte
FROM Scorecard
WHERE inexpfte IS NOT NULL and main='Main campus' and ugds>1000)
graphDistForYears(expenses, 1998, 2012)
purdueData = execute(SELECT year, tuitfte, inexpfte, grad_debt_mdn
FROM Scorecard
WHERE instnm='PURDUE UNIVERSITY-MAIN CAMPUS')
purdueData[:10]
def graphPurdueData(index):
df = pd.DataFrame()
df['Dollars'] = [adjustForInflation(row[index], row[0]) for row in purdueData]
df['Year'] = [row[0] for row in purdueData]
graph = sns.regplot('Year', 'Dollars',
data=df,
fit_reg=False)
graphPurdueData(1)
graphPurdueData(2)
graphPurdueData(3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Current visualization ideas
Step6: Odd outliers throughout needs to be explored and cleaned more
|
12,506
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.__version__
import matplotlib.pyplot as plt
import matplotlib
matplotlib.__version__
pump_df = pd.read_csv('https://raw.githubusercontent.com/yy/dviz-course/master/data/pumps.csv')
pump_df.head()
# TODO: write your code here
# Your code here
len(pump_df)
pump_df.size
pump_df.shape # 13 rows and 2 columns
pump_df.columns
pump_df.describe()
pump_df[:2]
pump_df[-2:]
pump_df[1:5]
pump_df[pump_df.X > 13]
# TODO: Remove below dummy dataframe and write your code here. You probably want to create multiple cells.
death_df = pd.DataFrame({"X": [2., 3.], "Y": [1., 2.]})
death_df.plot()
%matplotlib inline
death_df.plot(x='X', y='Y', kind='scatter', label='Deaths')
death_df.plot(x='X', y='Y', kind='scatter', label='Deaths', s=2, c='black')
death_df.plot(x='X', y='Y', s=2, c='black', kind='scatter', label='Deaths')
pump_df.plot(x='X', y='Y', kind='scatter', c='red', s=8, label='Pumps')
ax = death_df.plot(x='X', y='Y', s=2, c='black', kind='scatter', label='Deaths')
ax
ax = death_df.plot(x='X', y='Y', s=2, c='black', alpha=0.5, kind='scatter', label='Deaths')
pump_df.plot(x='X', y='Y', kind='scatter', c='red', s=8, label='Pumps', ax=ax)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# your code here
from scipy.spatial import Voronoi, voronoi_plot_2d
# you'll need this
points = pump_df.values
points
# TODO: your code here
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,2,3])
plt.savefig('foo.png')
# TODO: your code here
%%html
<!DOCTYPE html>
<html>
<head>
<style>
.para {
font: 20px times-new-roman;
color: green;
padding: 10px;
border: 1px solid black;
}
</style>
</head>
<body>
<p class='para'>Hello World!</p>
<!--
You can also add an image in your html code
<img src='location'/>
-->
</body>
</html>
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can check the version of the library. Because pandas is fast-evolving library, you want to make sure that you have the up-to-date version of the library.
Step2: You also need matplotlib, which is used by pandas to plot figures. The following is the most common convention to import matplotlib library.
Step3: Let's check its version too.
Step4: Loading a CSV data file
Step5: df stands for "Data Frame", which is a fundamental data object in Pandas. You can take a look at the dataset by looking at the first few lines.
Step6: Q1
Step7: You can also sample several rows randomly. If the data is sorted in some ways, sampling may give you a rather unbiased view of the dataset.
Step8: You can also figure out the number of rows in the dataset by running
Step9: Note that df.size does not give you the number of rows. It tells you the number of elements.
Step10: You can also look into the shape of the dataset as well as what are the columns in the dataset.
Step11: You can also check out basic descriptive statistics of the whole dataset by using describe() method.
Step12: You can slice the data like a list
Step13: or filter rows using some conditions.
Step14: Now let's load another CSV file that documents the cholera deaths. The URL is https
Step15: Some visualizations?
Step16: This is not what we want! When asked to plot the data, it tries to figure out what we want based on the type of the data. However, that doesn't mean that it will successfully do so!
Step17: The commands that start with % is called the magic commands, which are available in IPython and Jupyter. The purpose of this command is telling the IPython / Jupyter to show the plot right here instead of trying to use other external viewers.
Step18: I think I want to reduce the size of the dots and change the color to black. But it is difficult to find how to do that! It is sometimes quite annoying to figure out how to change how the visualization looks, especially when we use matplotlib. Unlike some other advanced tools, matplotlib does not provide a very coherent way to adjust your visualizations. That's one of the reasons why there are lots of visualization libraries that wrap matplotlib. Anyway, this is how you do it.
Step19: Can we visualize both deaths and pumps?
Step20: Oh well, this is not what we want! We want to overlay them to see them together, right? How can we do that? Before going into that, we probably want to understand some key components of matplotlib figures.
Step21: This object contains all the information and objects in the plot we see. Whatever we want to do with this axes (e.g., changing x or y scale, overlaying other data, changing the color or size of symbols, etc.) can be done by accessing this object.
Step22: Although simply invoking the plot() command is quick and easy when doing an exploratory data analysis, it is usually better to be formal about figure and axes objects.
Step23: Voronoi diagram
Step24: Take a look at the documentation of Voronoi and voronoi_plot_2d and
Step25: Saving the figure
Step26: Q4
Step27: Ok, that was a brief introduction to pandas and some simple visualizations. Now let's talk about web a little bit.
|
12,507
|
<ASSISTANT_TASK:>
Python Code:
##importing python module
import os
import pandas
import numpy
import gseapy
import mygene
import ipywidgets
import qgrid
import urllib2
qgrid.nbinstall(overwrite=True)
qgrid.set_defaults(remote_js=True, precision=4)
from IPython.display import IFrame
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
%matplotlib inline
##change directory
os.chdir("/Users/nicole/Documents/CCBB Internship")
##read in DESeq2 results
genes=pandas.read_csv("/Users/nicole/Documents/CCBB Internship/DE_genes.csv")
##View interactive table
#qgrid.show_grid(genes.sample(1000), grid_options={'forceFitColumns': False, 'defaultColumnWidth': 100})
#View top of file
genes.head(10)
#Extract genes that are differentially expressed with a pvalue less than a certain cutoff (pvalue < 0.05 or padj < 0.05)
genes_DE_only = genes.loc[(genes.padj < 0.05)]
#View top of file
genes_DE_only.head(10)
#Check how many rows in original genes file
len(genes)
#Check how many rows in DE genes file
len(genes_DE_only)
#Extract list of DE genes (Check to make sure this code works, this was adapted from a different notebook)
de_list = genes_DE_only[genes_DE_only.columns[0]]
#Remove .* from end of Ensembl ID
de_list2 = de_list.replace("\.\d","",regex=True)
#Add new column with reformatted Ensembl IDs
genes_DE_only["Full_Ensembl"] = de_list2
#View top of file
genes_DE_only.head(10)
#Set up mygene.info API and query
mg = mygene.MyGeneInfo()
gene_ids = mg.getgenes(de_list2, 'name, symbol, entrezgene', as_dataframe=True)
gene_ids.index.name = "Ensembl"
gene_ids.reset_index(inplace=True)
#View top of file
gene_ids.head(10)
#Merge mygene.info query results with original DE genes list
DE_with_ids = genes_DE_only.merge(gene_ids, left_on="Full_Ensembl", right_on="Ensembl")
#Check top of file
DE_with_ids.head(10)
#Write results to file
DE_with_ids.to_csv("./DE_genes_converted.csv")
#Convert to .txt file
DE_with_ids.to_csv("./DE_genes_converted.txt", sep='\t')
#Generate rank file (symbol, padj)
cols = DE_with_ids.columns.tolist()
cols.insert(0, cols.pop(cols.index('symbol')))
cols.insert(1, cols.pop(cols.index('log2FoldChange')))
prerank_file = DE_with_ids.reindex(columns= cols)
#Condense dataframe to contain only symbol & log2FoldChange
prerank_file.drop(prerank_file.columns[[2, 3, 4, 5, 6, 7, 8, 9, 10, 11,12,13]], axis=1, inplace=True)
#Exclude NaN values
prerank_file.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)
#Change column titles
prerank_file.columns = ['gene_name', 'rank']
prerank_file[['rank']] = prerank_file[['rank']].astype(float)
prerank_file.head(10)
#Display data types
prerank_file.dtypes
#Write results to file
prerank_file.to_csv("./prerank_file.csv", index=False)
#Remove header and separate by tab (.rnk format)
prerank_file.to_csv("./prerank_file.rnk", sep='\t', index=False, float_format='%.25f', header=False)
#Open .rnk file
pr = pandas.read_csv("/Users/nicole/Documents/CCBB Internship/prerank_file.rnk", sep='\t')
#gmt = pandas.read_csv("/Users/nicole/Documents/CCBB Internship/msigdb.v5.1.symbols.gmt-2.txt", sep = ' ')
#gmt.to_csv("/Users/nicole/Documents/CCBB Internship/msigdb.v5.1.symbols.gmt", sep = ' ')
#GSEApy prerank method to calculate es, nes, pval,fdrs, and produce figures
pr_results = gseapy.prerank(rnk = "/Users/nicole/Documents/CCBB Internship/prerank_file.rnk",
gene_sets = "/Users/nicole/Documents/CCBB Internship/msigdb.v5.1.symbols.gmt",
outdir='/Users/nicole/ccbb_internal/interns/Nicole/gseapy_output',
permutation_n=1000, graph_num = 539, format = 'png')
#Display GSEApy report
pr = pandas.read_csv("/Users/nicole/ccbb_internal/interns/Nicole/gseapy_output/gseapy_reports.csv")
#View interactive table
#qgrid.show_grid(pr, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 100})
pr.head(15)
#for loop that iterates through the top 10 enrich_terms and displays the plot
gseapyres = pandas.read_csv("/Users/nicole/ccbb_internal/interns/Nicole/gseapy_output/gseapy_reports.csv")
gseapyres = gseapyres.head(15)
for i in gseapyres.ix[:,0]:
image = i
address = "/Users/nicole/ccbb_internal/interns/Nicole/gseapy_output/%s.png" % image
img = mpimg.imread(address)
plt.imshow(img)
plt.gcf().set_size_inches(10,10)
plt.show()
i = i.split(" ")[1].strip()
geneset = i
print "more info about " + geneset + " available here:"
print 'http://www.broadinstitute.org/gsea/msigdb/cards/%s' % geneset
print ' '
#IFrame('http://www.broadinstitute.org/gsea/msigdb/cards/%s' % geneset, width=900, height=900)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in differential expression results as a Pandas data frame to get differentially expressed gene list
Step2: Translate Ensembl IDs to Gene Symbols and Entrez IDs using mygene.info API
Step3: Implement GSEAPY
|
12,508
|
<ASSISTANT_TASK:>
Python Code:
from datetime import datetime
datetime(year=2015, month=7, day=4)
from dateutil import parser
date = parser.parse("4th of July, 2015")
date
date.strftime('%A')
import numpy as np
date = np.array('2015-07-04', dtype=np.datetime64)
date
date + np.arange(12)
np.datetime64('2015-07-04')
np.datetime64('2015-07-04 12:00')
np.datetime64('2015-07-04 12:59:59.50', 'ns')
import pandas as pd
date = pd.to_datetime("4th of July, 2015")
date
date.strftime('%A')
date + pd.to_timedelta(np.arange(12), 'D')
index = pd.DatetimeIndex(['2014-07-04', '2014-08-04',
'2015-07-04', '2015-08-04'])
data = pd.Series([0, 1, 2, 3], index=index)
data
data['2014-07-04':'2015-07-04']
data['2015']
dates = pd.to_datetime([datetime(2015, 7, 3), '4th of July, 2015',
'2015-Jul-6', '07-07-2015', '20150708'])
dates
dates.to_period('D')
dates - dates[0]
pd.date_range('2015-07-03', '2015-07-10')
pd.date_range('2015-07-03', periods=8)
pd.date_range('2015-07-03', periods=8, freq='H')
pd.period_range('2015-07', periods=8, freq='M')
pd.timedelta_range(0, periods=10, freq='H')
pd.timedelta_range(0, periods=9, freq="2H30T")
from pandas.tseries.offsets import BDay
pd.date_range('2015-07-01', periods=5, freq=BDay())
from pandas_datareader import data
goog = data.DataReader('GOOG', start='2004', end='2016',
data_source='google')
goog.head()
goog = goog['Close']
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set()
goog.plot();
goog.plot(alpha=0.5, style='-')
goog.resample('BA').mean().plot(style=':')
goog.asfreq('BA').plot(style='--');
plt.legend(['input', 'resample', 'asfreq'],
loc='upper left');
fig, ax = plt.subplots(2, sharex=True)
data = goog.iloc[:10]
data.asfreq('D').plot(ax=ax[0], marker='o')
data.asfreq('D', method='bfill').plot(ax=ax[1], style='-o')
data.asfreq('D', method='ffill').plot(ax=ax[1], style='--o')
ax[1].legend(["back-fill", "forward-fill"]);
fig, ax = plt.subplots(3, sharey=True)
# apply a frequency to the data
goog = goog.asfreq('D', method='pad')
goog.plot(ax=ax[0])
goog.shift(900).plot(ax=ax[1])
goog.tshift(900).plot(ax=ax[2])
# legends and annotations
local_max = pd.to_datetime('2007-11-05')
offset = pd.Timedelta(900, 'D')
ax[0].legend(['input'], loc=2)
ax[0].get_xticklabels()[2].set(weight='heavy', color='red')
ax[0].axvline(local_max, alpha=0.3, color='red')
ax[1].legend(['shift(900)'], loc=2)
ax[1].get_xticklabels()[2].set(weight='heavy', color='red')
ax[1].axvline(local_max + offset, alpha=0.3, color='red')
ax[2].legend(['tshift(900)'], loc=2)
ax[2].get_xticklabels()[1].set(weight='heavy', color='red')
ax[2].axvline(local_max + offset, alpha=0.3, color='red');
ROI = 100 * (goog.tshift(-365) / goog - 1)
ROI.plot()
plt.ylabel('% Return on Investment');
rolling = goog.rolling(365, center=True)
data = pd.DataFrame({'input': goog,
'one-year rolling_mean': rolling.mean(),
'one-year rolling_std': rolling.std()})
ax = data.plot(style=['-', '--', ':'])
ax.lines[0].set_alpha(0.3)
# !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD
data = pd.read_csv('FremontBridge.csv', index_col='Date', parse_dates=True)
data.head()
data.columns = ['West', 'East']
data['Total'] = data.eval('West + East')
data.dropna().describe()
%matplotlib inline
import seaborn; seaborn.set()
data.plot()
plt.ylabel('Hourly Bicycle Count');
weekly = data.resample('W').sum()
weekly.plot(style=[':', '--', '-'])
plt.ylabel('Weekly bicycle count');
daily = data.resample('D').sum()
daily.rolling(30, center=True).sum().plot(style=[':', '--', '-'])
plt.ylabel('mean hourly count');
daily.rolling(50, center=True,
win_type='gaussian').sum(std=10).plot(style=[':', '--', '-']);
by_time = data.groupby(data.index.time).mean()
hourly_ticks = 4 * 60 * 60 * np.arange(6)
by_time.plot(xticks=hourly_ticks, style=[':', '--', '-']);
by_weekday = data.groupby(data.index.dayofweek).mean()
by_weekday.index = ['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']
by_weekday.plot(style=[':', '--', '-']);
weekend = np.where(data.index.weekday < 5, 'Weekday', 'Weekend')
by_time = data.groupby([weekend, data.index.time]).mean()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(14, 5))
by_time.ix['Weekday'].plot(ax=ax[0], title='Weekdays',
xticks=hourly_ticks, style=[':', '--', '-'])
by_time.ix['Weekend'].plot(ax=ax[1], title='Weekends',
xticks=hourly_ticks, style=[':', '--', '-']);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Or, using the dateutil module, you can parse dates from a variety of string formats
Step2: Once you have a datetime object, you can do things like printing the day of the week
Step3: In the final line, we've used one of the standard string format codes for printing dates ("%A"), which you can read about in the strftime section of Python's datetime documentation.
Step4: Once we have this date formatted, however, we can quickly do vectorized operations on it
Step5: Because of the uniform type in NumPy datetime64 arrays, this type of operation can be accomplished much more quickly than if we were working directly with Python's datetime objects, especially as arrays get large
Step6: Here is a minute-based datetime
Step7: Notice that the time zone is automatically set to the local time on the computer executing the code.
Step8: The following table, drawn from the NumPy datetime64 documentation, lists the available format codes along with the relative and absolute timespans that they can encode
Step9: Additionally, we can do NumPy-style vectorized operations directly on this same object
Step10: In the next section, we will take a closer look at manipulating time series data with the tools provided by Pandas.
Step11: Now that we have this data in a Series, we can make use of any of the Series indexing patterns we discussed in previous sections, passing values that can be coerced into dates
Step12: There are additional special date-only indexing operations, such as passing a year to obtain a slice of all data from that year
Step13: Later, we will see additional examples of the convenience of dates-as-indices.
Step14: Any DatetimeIndex can be converted to a PeriodIndex with the to_period() function with the addition of a frequency code; here we'll use 'D' to indicate daily frequency
Step15: A TimedeltaIndex is created, for example, when a date is subtracted from another
Step16: Regular sequences
Step17: Alternatively, the date range can be specified not with a start and endpoint, but with a startpoint and a number of periods
Step18: The spacing can be modified by altering the freq argument, which defaults to D.
Step19: To create regular sequences of Period or Timedelta values, the very similar pd.period_range() and pd.timedelta_range() functions are useful.
Step20: And a sequence of durations increasing by an hour
Step21: All of these require an understanding of Pandas frequency codes, which we'll summarize in the next section.
Step22: All of these short codes refer to specific instances of Pandas time series offsets, which can be found in the pd.tseries.offsets module.
Step23: For more discussion of the use of frequencies and offsets, see the "DateOffset" section of the Pandas documentation.
Step24: For simplicity, we'll use just the closing price
Step25: We can visualize this using the plot() method, after the normal Matplotlib setup boilerplate (see Chapter 4)
Step26: Resampling and converting frequencies
Step27: Notice the difference
Step28: The top panel is the default
Step29: We see here that shift(900) shifts the data by 900 days, pushing some of it off the end of the graph (and leaving NA values at the other end), while tshift(900) shifts the index values by 900 days.
Step30: This helps us to see the overall trend in Google stock
Step31: As with group-by operations, the aggregate() and apply() methods can be used for custom rolling computations.
Step32: Once this dataset is downloaded, we can use Pandas to read the CSV output into a DataFrame.
Step33: For convenience, we'll further process this dataset by shortening the column names and adding a "Total" column
Step34: Now let's take a look at the summary statistics for this data
Step35: Visualizing the data
Step36: The ~25,000 hourly samples are far too dense for us to make much sense of.
Step37: This shows us some interesting seasonal trends
Step38: The jaggedness of the result is due to the hard cutoff of the window.
Step39: Digging into the data
Step40: The hourly traffic is a strongly bimodal distribution, with peaks around 8
Step41: This shows a strong distinction between weekday and weekend totals, with around twice as many average riders crossing the bridge on Monday through Friday than on Saturday and Sunday.
Step42: Now we'll use some of the Matplotlib tools described in Multiple Subplots to plot two panels side by side
|
12,509
|
<ASSISTANT_TASK:>
Python Code:
import epoxpy
from epoxpy.lib import A
a = A()
a.visualize(show_ports=True)
from epoxpy.lib import C10
c10 = C10()
c10.visualize(show_ports=True)
from epoxpy.lib import Epoxy_A_10_B_20_C10_2_Blend
import mbuild as mb
import random
random.seed(1024)
blend = Epoxy_A_10_B_20_C10_2_Blend()
#blend.visualize()
mix_box = mb.packing.fill_box(blend,1,box=[3,3,3])
print(mix_box.n_bonds)
print(mix_box.n_particles)
mix_box.visualize(show_ports=False)
from foyer import Forcefield
mix_box.save('blend.gsd', write_ff=False)
mix_box.save('blend.hoomdxml')
%matplotlib inline
import matplotlib.pyplot as plt
import epoxpy.epoxy_simulation as es
import epoxpy.job as jb
import epoxpy.temperature_profile_builder as tpb
print('\n# Test1: Running the simulation in a single job')
# This simulation should run a total of 700 time steps because the default dt of the HOOMD engine is 1e-2
mix_time = 3e4
md_time = 4e4
mix_kt = 2.0
time_scale = 1e4
temp_scale = 1
type_A_md_temp_profile = tpb.LinearTemperatureProfileBuilder(initial_temperature=mix_kt, initial_time=mix_time)
type_A_md_temp_profile.add_state_point(60 * time_scale, 4.5 * temp_scale)
type_A_md_temp_profile.add_state_point(190 * time_scale, 4.5 * temp_scale)
type_A_md_temp_profile.add_state_point(240 * time_scale, 1.0 * temp_scale)
fig = type_A_md_temp_profile.get_figure()
plt.show()
fig.savefig("type_A_temp_profile.png")
myEpoxySim = es.EpoxySimulation('epoxy_test_mbuild', mix_time=mix_time, mix_kt=mix_kt,
temp_prof=type_A_md_temp_profile, n_mul=1.0, bond=True, bond_period=1*time_scale)
mySingleJobForEpoxy = jb.SingleJob(myEpoxySim)
mySingleJobForEpoxy.execute()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Particle C10
Step2: Epoxy blend (10 A's, 20 B's and 2 C10's)
|
12,510
|
<ASSISTANT_TASK:>
Python Code:
import rebound
import numpy as np
sim = rebound.Simulation()
OMEGA = 0.00013143527 # [1/s]
sim.ri_sei.OMEGA = OMEGA
surface_density = 400. # kg/m^2
particle_density = 400. # kg/m^3
sim.G = 6.67428e-11 # N m^2 / kg^2
sim.dt = 1e-3*2.*np.pi/OMEGA
sim.softening = 0.2 # [m]
boxsize = 200. # [m]
sim.configure_box(boxsize)
sim.configure_ghostboxes(2,2,0)
sim.integrator = "sei"
sim.boundary = "shear"
sim.gravity = "tree"
sim.collision = "tree"
def cor_bridges(r, v):
eps = 0.32*pow(abs(v)*100.,-0.234)
if eps>1.:
eps=1.
if eps<0.:
eps=0.
return eps
sim.coefficient_of_restitution = cor_bridges
def powerlaw(slope, min_v, max_v):
y = np.random.uniform()
pow_max = pow(max_v, slope+1.)
pow_min = pow(min_v, slope+1.)
return pow((pow_max-pow_min)*y + pow_min, 1./(slope+1.))
total_mass = 0.
while total_mass < surface_density*(boxsize**2):
radius = powerlaw(slope=-3, min_v=1, max_v=4) # [m]
mass = particle_density*4./3.*np.pi*(radius**3)
x = np.random.uniform(low=-boxsize/2., high=boxsize/2.)
sim.add(
m=mass,
r=radius,
x=x,
y=np.random.uniform(low=-boxsize/2., high=boxsize/2.),
z=np.random.normal(),
vx = 0.,
vy = -3./2.*x*OMEGA,
vz = 0.)
total_mass += mass
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def plotParticles(sim):
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(111,aspect='equal')
ax.set_ylabel("radial coordinate [m]")
ax.set_xlabel("azimuthal coordinate [m]")
ax.set_ylim(-boxsize/2.,boxsize/2.)
ax.set_xlim(-boxsize/2.,boxsize/2.)
for i, p in enumerate(sim.particles):
circ = patches.Circle((p.y, p.x), p.r, facecolor='darkgray', edgecolor='black')
ax.add_patch(circ)
plotParticles(sim)
sim.integrate(2.*np.pi/OMEGA)
plotParticles(sim)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next up, setting up several constants. We will be simulating a shearing sheet, a box with shear-periodic boundary conditions. This is a local approximation which makes the approximation that the epicyclic frequency $\Omega$ is the same for all particles.
Step2: Next, we need to let REBOUND know about $\Omega$. Within REBOUND $\Omega$ is used by the integrator SEI, the Symplectic Epicycle Integrator (see Rein and Tremaine 2012).
Step3: Finally, let us define the surface density of the ring and the particle density.
Step4: The gravitational constant in SI units is
Step5: We choose a timestep of 1/1000th of the orbital period.
Step6: We enable gravitational softening to smear out any potential numerical artefacts at very small scales.
Step7: Next up, we configure the simulation box. By default REBOUND used no boundary conditions, but here we have shear periodic boundaries and a finite simulation domain, so we need to let REBOUND know about the simulation boxsize (note that it is significantly smaller than $a$, so our local approximation is very good. In this example we'll work in SI units.
Step8: Because we have shear-periodic boundary conditions, we use ghost boxes to simulate the gravity of neighbouring ring patches. The more ghostboxes we use, the smoother the gravitational force accross the boundary. Here, two layers of ghost boxes in the x and y direction are enough (this is a total of 24 ghost boxes). We don't need ghost boxes in the z direction because a rings is a two dimensional system.
Step9: We can now setup which REBOUND modules we want to use for our simulation. Besides the SEI integrator and the shear-periodic boundary conditions mentioned above, we select the tree modules for both gravity and collisions. This speeds up the code from $O(N^2)$ to $O(N \log(N))$ for large numbers of particles $N$.
Step10: When two ring particles collide, they loose energy during their the bounce. We here use a velocity dependent Bridges et. al. coefficient of restitution. It is implemented as a python function (a C implementation would be faster!). We let REBOUND know which function we want to use by setting the coefficient_of_restitution function pointer in the simulation instance.
Step11: To initialize the particles, we will draw random numbers from a power law distribution.
Step12: Now we can finally add particles to REBOUND. Note that we initialize particles so that they have initially no velovity relative to the mean shear flow.
Step13: To see what is going on in our simulation, we create a function to plot the current positions of particles and call it once to visualise the initial conditions.
Step14: We now integrate for one orbital period $P=2\pi/\Omega$.
Step15: The integration takes a few seconds, then we can visualise the final particle positions.
|
12,511
|
<ASSISTANT_TASK:>
Python Code:
from datetime import datetime,timedelta, time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from data_helper_functions import *
from IPython.display import display
pd.options.display.max_columns = 999
%matplotlib inline
desired_channel = 'BAND_01'
desired_date = datetime(2014, 4, 1)
desired_timedelta = timedelta(hours = 15)
desired_datetime = desired_date + desired_timedelta
satellite_filefolder = '../../data/satellite/colorado/summer6months/data/'
sensor_filefolder = '../../data/sensor_data/colorado6months/'
pvoutput_filefolder = '../../data/pvoutput/pvoutput6months/'
#satellite data
satellite_filename = find_filename(desired_datetime, desired_channel, satellite_filefolder)
lons, lats, data = return_satellite_data(satellite_filename, satellite_filefolder)
plt.figure(figsize=(8, 8))
imgplot = plt.imshow(data)
imgplot.set_interpolation('none')
plt.savefig('foo.png')
plt.show()
#sensor data
sensor_filename = find_file_from_date(desired_date, sensor_filefolder)
df_sensor = return_sensor_data(sensor_filename, sensor_filefolder)
df_sensor[df_sensor.index == desired_datetime]
display(df_sensor[df_sensor.index == desired_datetime])
#pvoutput data
pvoutput_filename = find_file_from_date(desired_date, pvoutput_filefolder)
df_pvoutput = return_pvoutput_data(pvoutput_filename, pvoutput_filefolder)
display(df_pvoutput[df_pvoutput.index == desired_datetime])
#saving df to image
# a = Image(data=df_sensor)
# type(a)
from datetime import datetime,timedelta, time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from data_helper_functions import *
from IPython.display import display
pd.options.display.max_columns = 999
%matplotlib inline
#iterate over datetimes:
mytime = datetime(2014, 4, 1, 13)
times = make_time(mytime)
# Now that we can call data up over any datetime and we have a list of interested datetimes,
# we can finally construct an X matrix and y vector for regression.
sensor_filefolder = 'data/sensor_data/colorado6months/'
pvoutput_filefolder = 'data/pvoutput/pvoutput6months/'
X = [] #Sensor values
y = [] #PVOutput
for desired_datetime in times:
try: #something wrong with y on last day
desired_date = (desired_datetime - timedelta(hours=6)).date() #make sure correct date
desired_date = datetime.combine(desired_date, time.min) #get into datetime format
sensor_filename = find_file_from_date(desired_date, sensor_filefolder)
df_sensor = return_sensor_data(sensor_filename, sensor_filefolder).ix[:,-15:-1]
df_sensor[df_sensor.index == desired_datetime]
pvoutput_filename = find_file_from_date(desired_date, pvoutput_filefolder)
df_pvoutput = return_pvoutput_data(pvoutput_filename, pvoutput_filefolder)
y.append(df_pvoutput[df_pvoutput.index == desired_datetime].values[0][0])
X.append(df_sensor[df_sensor.index == desired_datetime].values[0])
except:
pass
X = np.array(X)
y = np.array(y)
print X.shape
print y.shape
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=99)
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(oob_score = True)
rfr.fit(X_train,y_train)
y_pred = rfr.predict(X_test)
rfr.score(X_test,y_test)
df_sensor.columns.values.shape
sorted_mask = np.argsort(rfr.feature_importances_)
for i in zip(df_sensor.columns.values,rfr.feature_importances_[sorted_mask])[::-1]:
print i
#now do a linear model and compare:
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train,y_train)
lr.score(X_test,y_test)
sorted_mask = np.argsort(lr.coef_)
for i in zip(df_sensor.columns.values,lr.coef_[sorted_mask])[::-1]:
print i
df_sensor.ix[:,-15:-1].head() #selects photometer and AOD,
# useful in next iteration of using sensor data to fit
import pandas as pd
import numpy as np
from sklearn.preprocessing import scale
from lasagne import layers
from lasagne.nonlinearities import softmax, rectify, sigmoid, linear, very_leaky_rectify, tanh
from lasagne.updates import nesterov_momentum, adagrad, momentum
from nolearn.lasagne import NeuralNet
import theano
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
y = y.astype('float32')
x = X.astype('float32')
scaler = StandardScaler()
scaled_x = scaler.fit_transform(x)
x_train, x_test, y_train, y_test = train_test_split(scaled_x, y, test_size = 0.2, random_state = 12)
nn_regression = NeuralNet(layers=[('input', layers.InputLayer),
# ('hidden1', layers.DenseLayer),
# ('hidden2', layers.DenseLayer),
('output', layers.DenseLayer)
],
# Input Layer
input_shape=(None, x.shape[1]),
# hidden Layer
# hidden1_num_units=512,
# hidden1_nonlinearity=softmax,
# hidden Layer
# hidden2_num_units=128,
# hidden2_nonlinearity=linear,
# Output Layer
output_num_units=1,
output_nonlinearity=very_leaky_rectify,
# Optimization
update=nesterov_momentum,
update_learning_rate=0.03,#0.02
update_momentum=0.8,#0.8
max_epochs=600, #was 100
# Others
#eval_size=0.2,
regression=True,
verbose=0,
)
nn_regression.fit(x_train, y_train)
y_pred = nn_regression.predict(x_test)
nn_regression.score(x_test, y_test)
val = 11
print y_pred[val][0]
print y_test[val]
plt.plot(y_pred,'ro')
plt.plot(y_test,'go')
from sklearn.ensemble import ExtraTreesRegressor
etr = ExtraTreesRegressor(oob_score=True, bootstrap=True,
n_jobs=-1, n_estimators=1000) #nj_obs uses all cores!
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=99)
etr.fit(X_train, y_train)
print etr.score(X_test,y_test)
print etr.oob_score_
y_pred = etr.predict(X_test)
from random import randint
val = randint(0,y_test.shape[0])
print y_pred[val]
print y_test[val]
print X.shape
print y.shape
from sklearn.externals import joblib
joblib.dump(etr, 'data/sensor-to-power-model/sensor-to-power-model.pkl')
np.savez_compressed('data/y.npz',y=y) #save y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build up sensor to pvoutput model
Step2: ...finally ready to model!
Step3: Linear model
Step4: When only keeping the photometer data, random forest and linear model do pretty similar. When I added all of the sensor instruments to the fit, rfr scored 0.87 and lr scored negative!
Step5: Extra Trees!
Step6: Save this thing and try it out on the simulated sensors!
|
12,512
|
<ASSISTANT_TASK:>
Python Code:
def do_something(arg1, arg2):
A short sentence describing what this function does.
More description
Parameters
----------
arg1 : type1
Description of the parameter ``arg1``
arg2 : type2
Description of the parameter ``arg2``
Returns
-------
type of return value (e.g. int, float, string, etc.)
A description of the thing the function returns (if anything)
# complete
# complete
do_something.__doc__
help(do_something)
do_something?
# this one does more than just show the docstring, but is useful to know nonetheless
do_something??
from your_neighbors_package import your_neighbors_code # complete
your_neighbors_code? # complete
... = your_neighbors_code(...)
... # complete
class MyClass: # if you're using Py2, you'll want to do "MyClass(object)"
A short description of the class.
Possibly some extended description, notes on how to sub-class, etc.
Parameters
----------
arg1 : type
Describe the first argument of the initializer
arg2 : type
Describe the second argument of the initializer
def __init__(self, arg1 arg2):
# note that the initializer gets *no* docstring, because it's in the class docs
#complete
def some_method(self, method_arg):
A short description of the method.
Possibly extended description.
Parameters
----------
method_arg : type
A description of the method's first (non-self) argument.
Returns
-------
return type
Description of the return value (if any)
#complete
import <mypackage> #complete
<mypackage>? #complete
from <mypackage> import <mymodule> #complete
<mymodule>? #complete
!conda install sphinx
%cd <yourpackage> #complete
!mkdir docs
%cd docs
!ls
# should be empty...
!sphinx-quickstart -a "<yourname>" -p <yourpackagename> -v <version> --ext-autodoc -q #complete
!ls
!make html
%cd .. #or whatever you need to do to get back to the base of your repository
%%file .gitignore
docs/_build/*
build
dist
!git add .gitignore docs
!git commit -m #complete
%%file second_doc.rst
A Document title goes here
--------------------------
More information. Here's a link back to the index page: :doc:`index`.
%%file api_docs.rst
API Documentation
=================
This package has two modules, detailed below.
Also, after doing this, you can mention some of the functions from *anywhere* in the docs by doing :func:`<yourpackage>.<module>.<function_you_want_documented>`.
<yourpackagename>
-----------------
.. automodule:: <yourpackage>
<your modulename>
-----------------
.. automodule:: <yourpackage>.<module>
.. autofunction:: <yourpackage>.<module>.<function_you_want_documented>
.. autofunction:: <yourpackage>.<module>.<function_you_want_documented2>
%%file <yourpackage>/class_heirarchy.py #complete
class A(): # this needs to be "A(object)" in py 2.x
pass
class B(A):
pass
class C(A):
pass
class D(B,C):
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Documenting Code
Step2: 1b
Step3: 1c
Step6: 1d
Step7: 1e
Step8: Problem 2
Step9: 2b
Step10: 2c
Step11: You should see various files have appeared in your docs directory, most critically a conf.py and index.rst
Step12: 2f
Step13: Problem 3
Step14: 4b
Step15: You may not need multiple automodule calls if uo on how you structured things. Also you might be able to avoid the autofunction directives depending on how you laid things out, by adding
|
12,513
|
<ASSISTANT_TASK:>
Python Code:
import iris
import numpy as np
import holoviews as hv
import holocube as hc
from cartopy import crs
from cartopy import feature as cf
hv.notebook_extension()
%%output size=400
feats = [cf.LAND, cf.OCEAN, cf.RIVERS, cf.LAKES, cf.BORDERS, cf.COASTLINE]
features = hv.Overlay([hc.GeoFeature(feature) for feature in feats])
features
projections = [crs.RotatedPole, crs.TransverseMercator, crs.Mercator, crs.LambertCylindrical,
crs.Geostationary, crs.AzimuthalEquidistant, crs.OSGB, crs.EuroPP, crs.Gnomonic,
crs.PlateCarree, crs.Mollweide, crs.OSNI, crs.Miller, crs.InterruptedGoodeHomolosine,
crs.LambertConformal, crs.SouthPolarStereo, crs.AlbersEqualArea, crs.Orthographic,
crs.NorthPolarStereo, crs.Robinson, crs.Stereographic]
hv.Layout([hc.GeoFeature(cf.COASTLINE, group=p.__name__)(plot=dict(projection=p()))
for p in projections]).display('all')
%output size=250
(features(plot=dict(projection=crs.Mollweide())) +
features.relabel(group='Geostationary Overlay')(plot=dict(projection=crs.Geostationary())))
%%output backend='matplotlib:nbagg' widgets='live' size=200
url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
layer = 'VIIRS_CityLights_2012'
hc.WMTS(url, layer=layer)(plot=dict(projection=crs.PlateCarree()))
%%output size=200
%%opts Overlay [apply_extents=True]
from cartopy.io.img_tiles import MapQuestOSM
from matplotlib.path import Path
def tube_locations():
Returns an (n, 2) array of selected London Tube locations in Ordnance
Survey GB coordinates.
Source: http://www.doogal.co.uk/london_stations.php
return np.array([[531738., 180890.], [532379., 179734.],
[531096., 181642.], [530234., 180492.],
[531688., 181150.], [530242., 180982.],
[531940., 179144.], [530406., 180380.],
[529012., 180283.], [530553., 181488.],
[531165., 179489.], [529987., 180812.],
[532347., 180962.], [529102., 181227.],
[529612., 180625.], [531566., 180025.],
[529629., 179503.], [532105., 181261.],
[530995., 180810.], [529774., 181354.],
[528941., 179131.], [531050., 179933.],
[530240., 179718.]])
theta = np.linspace(0, 2 * np.pi, 100)
circle_verts = np.vstack([np.sin(theta), np.cos(theta)]).T
concentric_circle = Path.make_compound_path(Path(circle_verts[::-1]),
Path(circle_verts * 0.6))
rectangle = Path([[-1.1, -0.2], [1, -0.2], [1, 0.3], [-1.1, 0.3]])
tiles = MapQuestOSM()
hc.GeoTiles(tiles)(plot=dict(projection=tiles.crs, zoom=14)) *\
hc.Points(tube_locations(), crs=crs.OSGB())(style=dict(color='r', s=100, marker=concentric_circle)) *\
hc.Points(tube_locations(), crs=crs.OSGB())(style=dict(color='b', s=100, marker=rectangle))
import numpy as np
import iris
import iris.coords
def realization_metadata(cube, field, fname):
if not cube.coords('realization'):
realization_number = fname[-6:-3]
realization_coord = iris.coords.AuxCoord(np.int32(realization_number), 'realization')
cube.add_aux_coord(realization_coord)
surface_temp = iris.load_cube(iris.sample_data_path('GloSea4', 'ensemble_???.pp'),
iris.Constraint('surface_temperature', realization=lambda value: True),
callback=realization_metadata)
cube = hc.HoloCube(surface_temp)
cube
kdims = ['time', 'realization']
img_hmap = hv.HoloMap(kdims=kdims)
for cb in surface_temp.slices(['longitude', 'latitude']):
key = tuple(cb.coord(kd).points[0] for kd in kdims)
img_hmap[key] = hc.Image(cb)
img_hmap.info
%opts Image [colorbar=True projection=crs.Geostationary()] (cmap='viridis')
img_hmap.last * hc.GeoFeature(cf.COASTLINE)
%output widgets='live' size=300
%%opts GeoImage [colorbar=True] (cmap='viridis')
(cube.groupby(['time', 'realization'], group_type=hc.Image) * hc.GeoFeature(cf.COASTLINE))
%%opts Contours [colorbar=True] (cmap='viridis')
(cube.groupby(['time', 'realization'], group_type=hc.Contours) * hc.GeoFeature(cf.COASTLINE))
%%opts Points [color_index=2 size_index=None]
cube.to.points(['longitude', 'latitude'])
%%opts Curve [aspect=2 xticks=4 ] (linestyle='--') NdOverlay [aspect=2 legend_position='right']
if cube.data.coord('latitude').bounds is None:
cube.data.coord('latitude').guess_bounds()
if cube.data.coord('longitude').bounds is None:
cube.data.coord('longitude').guess_bounds()
grid_weights = iris.analysis.cartography.area_weights(cube.data)
collapsed_cube = cube.data.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_weights)
hc.HoloCube(collapsed_cube).to.curve(['time']).overlay()
%%opts Curve [aspect=3 xticks=10]
collapsed_cube = cube.data.collapsed('forecast_period', iris.analysis.MEAN, weights=grid_weights)
hc.HoloCube(collapsed_cube).to.curve(['latitude'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting with projections
Step2: Below is the full list of cartopy projections that can be displayed using matplotlib.
Step3: We can test the different projections by creating a Layout of GeoFeature elements, each with a different projection
Step4: To change the projection we can use the call method on HoloViews objects and set it as a plot option, this way we can easily compose plots with different projections. Here we compose a StockImage using the Mollweide projection with an Overlay of a StockImage and Coastlines Element set to a GeoStationary projection.
Step5: This way we can also use different Element types such as this WMTS Element, which allows wrapping any webmap tilesource
Step7: In some cases the data does not define the coordinate system the data is in automatically, e.g. when using points. In this case we can supply a coordinate reference system directly. Here we display a GeoTiles object drawn from the MapQuest ordinance survey map of Great Britain, and overlay this tile source with points corresponding to the tube map locations. Since these coordinates are in Ordinance Survery GB coordinates we declare that explicitly via the crs parameter on the GeoPoints.
Step8: Loading and displaying data
Step9: The Cube Element defined above wraps the Iris Cubes, converting coordinates to HoloViews dimensions and tries to infer the correct order of dimensions
Step10: We'll come back to the Cube Element later for now we will slice this cube up manually. By taking slices along latitude and longitude we can slice the data up into 2D chunks and wrap them in GeoImage a subclass of Cube which can be visualized. We place these object into a HoloMap with the remaining dimensions time and realization as key dimensions.
Step11: The HoloMap can summarize the contained data
Step12: A convenient way of accessing a single Element in a HoloMap is the .last attribute. Now that we have a handle on it we can customize it a number of ways using the call method as above or using the options magic
Step13: Groupby and conversions
Step14: Slicing a Cube up in the way we saw before is often very useful but it's also a little bit of effort. To make this easier HoloViews interfaces usually implement a groupby method. Here we show how to achieve the same thing as above but using groupby instead. We may add another clearer interface eventually but groupby will provide the low level API for any such conversion interface.
Step15: As you can see it has automatically converted the cube to an widget allowing you to explore this space. We can repeat the same groupby operation this time with a Contours Element as the group_type.
Step16: Working with non-geographic Element types
Step17: We can also collapse specific dimensions on the iris Cube first and then view the reduced Cube using regular HoloViews Element types. Here we collapse the longitude and latitude dimensions on the iris Cube by taking the weighted mean, wrap it in a HoloCube and then view the mean surface temperature for each realization and overlay the curves.
Step18: Similarly we can collapse the forecast period, leaving just latitude and longitude coordinates and then view slices along each longitude
|
12,514
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
import pandexo.engine.justdoit as jdi
import numpy as np
import os
exo_dict = jdi.load_exo_dict('HD 189733 b')
exo_dict['observation']['sat_level'] = 80 #saturation level in percent of full well
exo_dict['observation']['sat_unit'] = '%'
exo_dict['observation']['noccultations'] = 2 #number of transits
exo_dict['observation']['R'] = None #fixed binning. I usually suggest ZERO binning.. you can always bin later
#without having to redo the calcualtion
exo_dict['observation']['baseline_unit'] = 'total' #Defines how you specify out of transit observing time
#'frac' : fraction of time in transit versus out = in/out
#'total' : total observing time (seconds)
exo_dict['observation']['baseline'] = 4.0*60.0*60.0 #in accordance with what was specified above (total observing time)
exo_dict['observation']['noise_floor'] = 0 #this can be a fixed level or it can be a filepath
#to a wavelength dependent noise floor solution (units are ppm)
exo_dict['planet']['type'] ='user' #tells pandexo you are uploading your own spectrum
exo_dict['planet']['exopath'] = 'wasp12b.txt'
exo_dict['planet']['w_unit'] = 'cm' #other options include "um","nm" ,"Angs", "sec" (for phase curves)
exo_dict['planet']['f_unit'] = 'rp^2/r*^2' #other options are 'fp/f*'
exo_dict['planet']['type'] = 'constant' #tells pandexo you want a fixed transit depth
exo_dict['planet']['f_unit'] = 'rp^2/r*^2' #this is what you would do for primary transit
#ORRRRR....
#if you wanted to instead to secondary transit at constant temperature
exo_dict['planet']['f_unit'] = 'fp/f*'
exo_dict['planet']['temp'] = 1000
exo_dict['planet']['type'] = 'grid' #tells pandexo you want to pull from the grid
exo_dict['planet']['temp'] = 1000 #grid: 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500
exo_dict['planet']['chem'] = 'noTiO' #options: 'noTiO' and 'eqchem', noTiO is chemical eq. without TiO
exo_dict['planet']['cloud'] = 'ray10' #options: nothing: '0',
#jdi.print_instruments()
result = jdi.run_pandexo(exo_dict,['NIRCam F322W2'])
inst_dict = jdi.load_mode_dict('NIRSpec G140H')
#loading in instrument dictionaries allow you to personalize some of
#the fields that are predefined in the templates. The templates have
#the subbarays with the lowest frame times and the readmodes with 1 frame per group.
#if that is not what you want. change these fields
#Try printing this out to get a feel for how it is structured:
print(inst_dict['configuration'])
#Another way to display this is to print out the keys
inst_dict.keys()
print("SUBARRAYS")
print(jdi.subarrays('nirspec'))
print("FILTERS")
print(jdi.filters('nircam'))
print("DISPERSERS")
print(jdi.dispersers('nirspec'))
#you can try personalizing some of these fields
inst_dict["configuration"]["detector"]["ngroup"] = 'optimize' #running "optimize" will select the maximum
#possible groups before saturation.
#You can also write in any integer between 2-65536
inst_dict["configuration"]["detector"]["subarray"] = 'substrip256' #change the subbaray
inst_dict['background'] = 'ecliptic'
inst_dict['background_level'] = 'high'
inst_dict = jdi.load_mode_dict('NIRISS SOSS')
inst_dict['strategy']['order'] = 2
inst_dict['configuration']['detector']['subarray'] = 'substrip256'
ngroup_from_order1_run = 2
inst_dict["configuration"]["detector"]["ngroup"] = ngroup_from_order1_run
jdi.print_instruments()
result = jdi.run_pandexo(exo_dict,['NIRCam F322W2'])
inst_dict = jdi.load_mode_dict('NIRSpec G395M')
#personalize subarray
inst_dict["configuration"]["detector"]["subarray"] = 'sub2048'
result = jdi.run_pandexo(exo_dict, inst_dict)
np.mea(result['FinalSpectrum']['spectrum_w_rand'])
#choose select
result = jdi.run_pandexo(exo_dict,['NIRSpec G140M','NIRSpec G235M','NIRSpec G395M'],
output_file='three_nirspec_modes.p')
#run all
#result = jdi.run_pandexo(exo_dict, ['RUN ALL'], save_file = False)
#looping over different exoplanet models
jdi.run_pandexo(exo_dict, ['NIRCam F444W'], param_space = 'planet+exopath',
param_range = os.listdir('/path/to/location/of/models'),
output_path = '/path/to/output/simulations')
#looping over different stellar temperatures
jdi.run_pandexo(exo_dict, ['NIRCam F444W'], param_space = 'star+temp',
param_range = np.linspace(5000,8000,2),
output_path = '/path/to/output/simulations')
#looping over different saturation levels
jdi.run_pandexo(exo_dict, ['NIRCam F444W'], param_space = 'observation+sat_level',
param_range = np.linspace(.5,1,5),
output_path = '/path/to/output/simulations')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Exo Dict for Specific Planet
Step2: Edit exoplanet observation inputs
Step3: Edit exoplanet inputs using one of three options
Step4: 2) Users can also add in a constant temperature or a constant transit depth
Step5: 3) Select from grid
Step6: Load in instrument dictionary (OPTIONAL)
Step 2 is optional because PandExo has the functionality to automatically load in instrument dictionaries. Skip this if you plan on observing with one of the following and want to use the subarray with the smallest frame time and the readout mode with 1 frame/1 group (standard)
Step7: Don't know what instrument options there are?
Step8: Adjusting the Background Level
Step9: Running NIRISS SOSS Order 2
Step10: Running PandExo
Step11: Option 2- Run single instrument mode (with user dict), single planet
Step12: Option 3- Run several modes, single planet
Step13: Option 4- Run single mode, several planet cases
|
12,515
|
<ASSISTANT_TASK:>
Python Code:
import torch
@torch.jit.script
def activation_cell(cx):
return torch.tanh(cx)
# note takes non-parameter jit.script functions activation_cell from context at definition time! (Probably will want to do this in a factory style function even if it's not 100% Pythonic)
@torch.jit.script
def lstm_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
# type: (Tensor, Tuple[Tensor, Tensor], Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor]
hx, cx = hidden
gates = torch.mm(input, w_ih.t()) + torch.mm(hx, w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = activation_cell(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
print(lstm_cell.graph.pretty_print())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Functional Interfacefor a Cell
|
12,516
|
<ASSISTANT_TASK:>
Python Code:
#import our modules
from __future__ import print_function
import fwdpy as fp
import numpy as np
import datetime
import time
#set up our sim
rng = fp.GSLrng(101)
nregions = [fp.Region(0,1,1),fp.Region(2,3,1)]
sregions = [fp.ExpS(1,2,1,-0.1),fp.ExpS(1,2,0.1,0.001)]
rregions = [fp.Region(0,3,1)]
popsizes = np.array([1000]*10000,dtype=np.uint32)
#Run the sim
pops = fp.evolve_regions(rng,4,1000,popsizes[0:],0.001,0.0001,0.001,nregions,sregions,rregions)
#Take samples from the simulation
samples = [fp.get_samples(rng,i,20) for i in pops]
for i in samples:
windows = []
start = 0
while start < 3:
##We will only look at neutral mutations, which are element 0 of each sampl
window = [j[0] for j in i[0] if (j[0] >=start and j[0] < start+0.1)]
windows.append(window)
start += 0.1
##We now have a full set of windows that we can do something with
print (len(windows)) ##There should be 30, and many will be empy
from libsequence.windows import Windows
from libsequence.polytable import SimData
for i in samples:
##We need to convert our list of tuples
##into types that pylibseq/libsequence understand:
windows = Windows(SimData(i[0]),0.1,0.1,0,3)
##Now, you can analyze the windows, etc.
print(len(windows))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Calculating sliding windows
Step2: Using pylibseq
|
12,517
|
<ASSISTANT_TASK:>
Python Code:
# Import modules you might use
import numpy as np
# Some data, in a list
my_data = [12, 5, 17, 8, 9, 11, 21]
# Function for calulating the mean of some data
def mean(data):
# Initialize sum to zero
sum_x = 0.0
# Loop over data
for x in data:
# Add to sum
sum_x += x
# Divide by number of elements in list, and return
return sum_x / len(data)
sum_x = 0
# Loop over data
for x in my_data:
# Add to sum
sum_x += x
print(sum_x)
mean(my_data)
# Function for calulating the mean of some data
def mean(data):
# Call sum, then divide by the numner of elements
return sum(data)/len(data)
# Function for calculating variance of data
def var(data):
# Get mean of data from function above
x_bar = mean(data)
# Do sum of squares in one line
sum_squares = sum([(x - x_bar)**2 for x in data])
# Divide by n-1 and return
return sum_squares/(len(data)-1)
x = (45, 95, 100, 47, 92, 43)
y = (65, 73, 10, 82, 6, 23)
z = (56, 33, 110, 56, 86, 88)
datasets = (x,y,z)
datasets
means = []
for d in datasets:
means.append(mean(d))
means
list(map(mean, datasets))
np.mean(datasets, axis=1)
42 # Integer
0.002243 # Floating-point
5.0J # Imaginary
'foo'
"bar" # Several string types
s = Multi-line
string
type(True)
not False
x = None
print(x)
15/4
(14 - 5) * 4
(34,90,56) # Tuple with three elements
(15,) # Tuple with one element
(12, 'foobar') # Mixed tuple
foo = (5,7,2,8,2,-1,0,4)
foo[0]
foo[2:5]
foo[:-2]
foo[1::2]
a = (1,2,3)
a[0] = 6
tuple('foobar')
# List with five elements
[90, 43.7, 56, 1, -4]
# Tuple with one element
[100]
# Empty list
[]
bar = [5,8,4,2,7,9,4,1]
bar[3] = -5
bar
bar * 3
[0]*10
(3,)*10
bar.extend(foo) # Adds foo to the end of bar (in-place)
bar
bar.append(5) # Appends 5 to the end of bar
bar
bar.insert(0, 4) # Inserts 4 at index 0
bar
bar.remove(7) # Removes the first occurrence of 7
bar
bar.remove(100) # Oops! Doesn’t exist
bar.pop(4) # Removes and returns indexed item
bar.reverse() # Reverses bar in place
bar
bar.sort() # Sorts bar in place
bar
bar.count(7) # Counts occurrences of 7 in bar
bar.index(7) # Returns index of first 7 in bar
my_dict = {'a':16, 'b':(4,5), 'foo':'''(noun) a term used as a universal substitute
for something real, especially when discussing technological ideas and
problems'''}
my_dict
my_dict['b']
len(my_dict)
# Checks to see if ‘a’ is in my_dict
'a' in my_dict
# Returns a copy of the dictionary
my_dict.copy()
# Returns key/value pairs as list
my_dict.items()
# Returns list of keys
my_dict.keys()
# Returns list of values
my_dict.values()
my_dict['c']
my_dict.get('c')
my_dict.get('c', -1)
my_dict.popitem()
# Empties dictionary
my_dict.clear()
my_dict
my_set = {4, 5, 5, 7, 8}
my_set
empty_set = set()
empty_set
empty_set.add(-5)
another_set = empty_set
another_set
my_set | another_set
my_set & another_set
my_set - {4}
bar
set(bar)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notice that, rather than using parentheses or brackets to enclose units of code (such as loops or conditional statements), python simply uses indentation. This relieves the programmer from worrying about a stray bracket causing her program to crash. Also, it forces programmers to code in neat blocks, making programs easier to read. So, for the following snippet of code
Step2: The first line initializes a variable to hold the sum, and the second initiates a loop, where each element in the data list is given the name x, and is used in the code that is indented below. The first line of subsequent code that is not indented signifies the end of the loop. It takes some getting used to, but works rather well.
Step3: Our specification of mean and var are by no means the most efficient implementations. Python provides some syntax and built-in functions to make things easier, and sometimes faster
Step4: In the new implementation of mean, we use the built-in function sum to reduce the function to a single line. Similarly, var employs a list comprehension syntax to make a more compact and efficient loop.
Step5: This can be done using a classical loop
Step6: Or, more succinctly using map
Step7: Similarly we did not have to code these functions to get means and variances; the numpy package that we imported at the beginning of the module has similar methods
Step9: Data Types and Data Structures
Step10: There are a handful of constants that exist in the built-in-namespace. Importantly, there are boolean values True and False
Step11: Either of these can be negated using not.
Step12: In addition, there is a None type that represents the absence of a value.
Step13: All the arithmetic operators are available in Python
Step14: Compatibility Corner
Step15: There are several Python data structures that are used to encapsulate several elements in a set or sequence.
Step16: Individual elements in a tuple can be accessed by indexing. This amounts to specifying the appropriate element index enclosed in square brackets following the tuple name
Step17: Notice that the index is zero-based, meaning that the first index is zero, rather than one (in contrast to R). So above, 5 retrieves the sixth item, not the fifth.
Step18: This retrieves the third, fourth and fifth (but not the sixth!) elements -- i.e., up to, but not including, the final index. One may also slice or index starting from the end of a sequence, by using negative indices
Step19: As you can see, this returns all elements except the final two.
Step20: The elements of a tuple, as defined above, are immutable. Therefore, Python takes offense if you try to change them
Step21: The TypeError is called an exception, which in this case indicates that you have tried to perform an action on a type that does not support it. We will learn about handling exceptions further along.
Step22: Why does this happen? Because in Python, strings are considered a sequence of characters.
Step23: Elements of a list can be arbitrarily substituted by assigning new values to the associated index
Step24: Operations on lists are somewhat unusual. For example, multiplying a list by an integer does not multiply each element by that integer, as you might expect, but rather
Step25: Which is simply three copies of the list, concatenated together. This is useful for generating lists with identical elements
Step26: (incidentally, this works with tuples as well)
Step27: Since lists are mutable, they retain several methods, some of which mutate the list. For example
Step28: Some methods, however, do not change the list
Step29: Dictionaries
Step30: Notice that a indexes an integer, b a tuple, and foo a string (now you know what foo means). Hence, a dictionary is a sort of associative array. Some languages refer to such a structure as a hash or key-value store.
Step31: Some useful dictionary methods are
Step32: When we try to index a value that does not exist, it raises a KeyError.
Step33: If we would rather not get the error, we can use the get method, which returns None if the value is not present.
Step34: Custom return values can be specified with a second argument.
Step35: It is easy to remove items from a dictionary.
Step36: Sets
Step37: We can also use the set constructor.
Step38: As we would expect, we can perform set operations.
Step39: The set function is useful for returning the unique elements of a data structure. For example, recall bar
|
12,518
|
<ASSISTANT_TASK:>
Python Code:
!rm mnist_data.mat && wget https://github.com/KordingLab/lab_teaching_2015/raw/master/session_2/mnist_data.mat
import numpy as np
from scipy.io import loadmat
mnist = loadmat('mnist_data.mat')
X = mnist['X']
y = mnist['y']
print("Size of X and y: ", X.shape, y.shape)
def sigmoid(z):
g = 1.0/(1.0 + np.exp(-z))
return g
def sigmoid_grad(z):
g = sigmoid(z)*(1 - sigmoid(z))
return g
def init_weight(L_in, L_out):
Randomly initialize the weights of a layer
epsilon_init = 0.12
W = np.random.rand(L_out, 1+L_in)*2*epsilon_init - epsilon_init
return W
def compute_cost_grad(Theta1, Theta2, X, y, lm=1.0):
Compute cost and gradient of NN parameters for one iteration
# initialize few parameters
n, m = X.shape
J = 0
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
D1 = np.zeros(Theta1.shape)
D2 = np.zeros(Theta2.shape)
# forward propagation
a1 = np.concatenate((np.ones((n,1)), X), axis=1)
z2 = a1.dot(Theta1.T)
a2 = sigmoid(z2)
a2 = np.concatenate((np.ones((n,1)), a2), axis=1)
z3 = a2.dot(Theta2.T)
a3 = sigmoid(z3)
h = a3
# tranform y to Y, sparse format
Y = np.zeros(h.shape)
for i in range(n):
Y[i, y[i]-1] = 1
# compute cost
J = -(1.0/n)*np.sum(np.sum(Y*np.log(h) + (1 - Y)*np.log(1 - h))) + \
(lm/(2.0*n))*(np.sum(np.sum(Theta1[:,1::]**2)) + np.sum(np.sum(Theta2[:,1::]**2)))
# back propagation
delta3 = a3 - Y
delta2 = delta3.dot(Theta2[:, 1::])*sigmoid_grad(z2)
D1 = D1 + (delta2.T).dot(a1)
Theta1_grad[:,0] = (1.0/m)*D1[:,0]
Theta1_grad[:,1::] = (1.0/m)*D1[:,1::] + (lm/m)*Theta1[:,1::]
D2 = D2 + (delta3.T).dot(a2)
Theta2_grad[:,0] = (1.0/m)*D2[:,0]
Theta2_grad[:,1::] = (1.0/m)*D2[:,1::] + (lm/m)*Theta2[:,1::]
return J, Theta1_grad, Theta2_grad
def predict(Theta1, Theta2, X):
n, m = X.shape
h1 = sigmoid(np.concatenate((np.ones((n,1)), X), axis=1).dot(Theta1.T))
h2 = sigmoid(np.concatenate((np.ones((n,1)), h1), axis=1).dot(Theta2.T))
p = np.atleast_2d(np.argmax(h2, axis=1) + 1).T
return p
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
%matplotlib inline
# use simple gradient descent algorithm to get final Theta1 and Theta2
n_iter = 1000
epsilon = 0.3
Theta1 = init_weight(400, 25)
Theta2 = init_weight(25, 10)
J_all = []
for i in range(n_iter):
J, Theta1_grad, Theta2_grad = compute_cost_grad(Theta1, Theta2, X, y)
Theta1 = (Theta1 - epsilon*Theta1_grad)
Theta2 = (Theta2 - epsilon*Theta2_grad)
J_all.append(J)
# using Theta1, Theta2 from gradient descent
y_pred = predict(Theta1, Theta2, X)
print('Training accuracy = ', np.mean(y == y_pred)*100)
plt.plot(range(n_iter), J_all)
plt.xlabel('Iteration')
plt.ylabel('Cost (J)')
plt.title('Cost function over iteration')
plt.show()
plt.imshow(X[0].reshape(20, 20));
plt.imshow(Theta1[1, 1:].reshape(20, 20));
plt.imshow(Theta1[5, 1:].reshape(20, 20));
plt.colorbar();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Back propagation algorithm
Step4: Work flow for computing final $\Theta_1, \Theta_2$
Step5: Analyzing weights
|
12,519
|
<ASSISTANT_TASK:>
Python Code:
def multiplek(y_test, topk):
multik = []
for i, score in enumerate(y_test):
if y_test[i] in topk[i]:
multik.append(y_test[i])
else:
multik.append(topk[i][0])
return multik
pred = grid.predict(X_test.ravel()) # predicts a category
p_prob = grid.predict_proba(X_test.ravel()) # predicts the probabilities of each category
idxs = np.argsort(p_prob[0])[::-1] # sorts probabilities in descending order
print(grid.best_estimator_.named_steps["lr"].classes_[idxs][:3])
def topk_scored(model,step_name X_test, k):
results = []
p_prob = model.predict_proba(X_test)
for pred in p_prob:
idxs = np.argsort(pred)[::-1]
results.append(model.best_estimator_.named_steps[step_name].classes_[idxs][:k])
return results
pps = topk_scored(grid,"lr", X_test.ravel(), 3)
def scoring_multiplek(y_test, topk):
result = 0
l = len(y_test)
for i, score in enumerate(y_test):
if y_test[i] in topk[i]:
result += 1
return result/l
adjusted_score = scoring_multiplek(y_test.values, pps)
adjusted_score
X_test[:3]
y_test[:3]
pps[:3]
# Dataframe to review 3-category predictions
cont = list(zip(X_test,y_test.values, pps))
cols = ["Input", "Input Category", "Predicted Categories"]
results_df = pd.DataFrame(cont ,columns=cols)
results_df.head()
results_df.tail()
sc = scoring_multiplek(y_test.values, pps)
sc
x = "new on twitter - shots of the keynote speakers, the panel members and the audiences reactions, as well as while people network. posed are fine but candid photos are preferred. the photographer will need to check in on the first floor with building security and say s/he is visiting twitter. security will send s/he to the 9th floor where they will check in with reception."
example = grid.predict_proba(x)
ex_idxs = np.argsort(example[0])[::-1]
print(grid.best_estimator_.named_steps["lr"].classes_[ex_idxs][:3])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Predicting top 3 categories
Step2: The function below brings all the steps above so it can be applied to the X_test set.
Step3: The function below marks the result as positive if the correct category is within topk.
Step4: By selecting the top 3 categories, the <u>accuracy score</u> using Logistic Regression goes from %64 to 86%.
Step5: Implementation of Top 3 category recomendations
|
12,520
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
import numpy
import scipy.stats
import matplotlib.pyplot as pyplot
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# seed the random number generator so we all get the same results
numpy.random.seed(18)
# some nicer colors from http://colorbrewer2.org/
COLOR1 = '#7fc97f'
COLOR2 = '#beaed4'
COLOR3 = '#fdc086'
COLOR4 = '#ffff99'
COLOR5 = '#386cb0'
%matplotlib inline
weight = scipy.stats.lognorm(0.23, 0, 70.8)
weight.mean(), weight.std()
xs = numpy.linspace(20, 160, 100)
ys = weight.pdf(xs)
pyplot.plot(xs, ys, linewidth=4, color=COLOR1)
pyplot.xlabel('weight (kg)')
pyplot.ylabel('PDF')
None
def make_sample(n=100):
sample = weight.rvs(n)
return sample
sample = make_sample(n=100)
sample.mean(), sample.std()
def sample_stat(sample):
return sample.mean()
def compute_sampling_distribution(n=100, iters=1000):
stats = [sample_stat(make_sample(n)) for i in range(iters)]
return numpy.array(stats)
sample_means = compute_sampling_distribution(n=100, iters=1000)
pyplot.hist(sample_means, color=COLOR5)
pyplot.xlabel('sample mean (n=100)')
pyplot.ylabel('count')
None
sample_means.mean()
std_err = sample_means.std()
std_err
conf_int = numpy.percentile(sample_means, [5, 95])
conf_int
def plot_sampling_distribution(n, xlim=None):
Plot the sampling distribution.
n: sample size
xlim: [xmin, xmax] range for the x axis
sample_stats = compute_sampling_distribution(n, iters=1000)
se = numpy.std(sample_stats)
ci = numpy.percentile(sample_stats, [5, 95])
pyplot.hist(sample_stats, color=COLOR2)
pyplot.xlabel('sample statistic')
pyplot.xlim(xlim)
text(0.03, 0.95, 'CI [%0.2f %0.2f]' % tuple(ci))
text(0.03, 0.85, 'SE %0.2f' % se)
pyplot.show()
def text(x, y, s):
Plot a string at a given location in axis coordinates.
x: coordinate
y: coordinate
s: string
ax = pyplot.gca()
pyplot.text(x, y, s,
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes)
plot_sampling_distribution(100)
def sample_stat(sample):
return sample.mean()
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_sampling_distribution, n=slider, xlim=fixed([55, 95]))
None
def sample_stat(sample):
# TODO: replace the following line with another sample statistic
return sample.mean()
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_sampling_distribution, n=slider, xlim=fixed([0, 100]))
None
class Resampler(object):
Represents a framework for computing sampling distributions.
def __init__(self, sample, xlim=None):
Stores the actual sample.
self.sample = sample
self.n = len(sample)
self.xlim = xlim
def resample(self):
Generates a new sample by choosing from the original
sample with replacement.
new_sample = numpy.random.choice(self.sample, self.n, replace=True)
return new_sample
def sample_stat(self, sample):
Computes a sample statistic using the original sample or a
simulated sample.
return sample.mean()
def compute_sampling_distribution(self, iters=1000):
Simulates many experiments and collects the resulting sample
statistics.
stats = [self.sample_stat(self.resample()) for i in range(iters)]
return numpy.array(stats)
def plot_sampling_distribution(self):
Plots the sampling distribution.
sample_stats = self.compute_sampling_distribution()
se = sample_stats.std()
ci = numpy.percentile(sample_stats, [5, 95])
pyplot.hist(sample_stats, color=COLOR2)
pyplot.xlabel('sample statistic')
pyplot.xlim(self.xlim)
text(0.03, 0.95, 'CI [%0.2f %0.2f]' % tuple(ci))
text(0.03, 0.85, 'SE %0.2f' % se)
pyplot.show()
def interact_func(n, xlim):
sample = weight.rvs(n)
resampler = Resampler(sample, xlim=xlim)
resampler.plot_sampling_distribution()
interact_func(n=100, xlim=[50, 100])
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(interact_func, n=slider, xlim=fixed([50, 100]))
None
# Solution goes here
def interact_func2(n, xlim):
sample = weight.rvs(n)
resampler = StdResampler(sample, xlim=xlim)
resampler.plot_sampling_distribution()
interact_func2(n=100, xlim=[0, 100])
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(interact_func2, n=slider, xlim=fixed([0, 100]))
None
female_weight = scipy.stats.lognorm(0.23, 0, 70.8)
female_weight.mean(), female_weight.std()
male_weight = scipy.stats.lognorm(0.20, 0, 87.3)
male_weight.mean(), male_weight.std()
female_sample = female_weight.rvs(100)
male_sample = male_weight.rvs(100)
male_sample.mean() - female_sample.mean()
def CohenEffectSize(group1, group2):
Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / numpy.sqrt(pooled_var)
return d
CohenEffectSize(male_sample, female_sample)
class CohenResampler(Resampler):
def __init__(self, group1, group2, xlim=None):
self.group1 = group1
self.group2 = group2
self.xlim = xlim
def resample(self):
n, m = len(self.group1), len(self.group2)
group1 = numpy.random.choice(self.group1, n, replace=True)
group2 = numpy.random.choice(self.group2, m, replace=True)
return group1, group2
def sample_stat(self, groups):
group1, group2 = groups
return CohenEffectSize(group1, group2)
resampler = CohenResampler(male_sample, female_sample)
resampler.plot_sampling_distribution()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part One
Step2: Here's what that distribution looks like
Step3: make_sample draws a random sample from this distribution. The result is a NumPy array.
Step4: Here's an example with n=100. The mean and std of the sample are close to the mean and std of the population, but not exact.
Step5: We want to estimate the average weight in the population, so the "sample statistic" we'll use is the mean
Step6: One iteration of "the experiment" is to collect a sample of 100 women and compute their average weight.
Step7: The next line runs the simulation 1000 times and puts the results in
Step8: Let's look at the distribution of the sample means. This distribution shows how much the results vary from one experiment to the next.
Step9: The mean of the sample means is close to the actual population mean, which is nice, but not actually the important part.
Step10: The standard deviation of the sample means quantifies the variability from one experiment to the next, and reflects the precision of the estimate.
Step11: We can also use the distribution of sample means to compute a "90% confidence interval", which contains 90% of the experimental results
Step14: Now we'd like to see what happens as we vary the sample size, n. The following function takes n, runs 1000 simulated experiments, and summarizes the results.
Step15: Here's a test run with n=100
Step16: Now we can use interact to run plot_sampling_distribution with different values of n. Note
Step17: Other sample statistics
Step24: STOP HERE
Step25: The following function instantiates a Resampler and runs it.
Step26: Here's a test run with n=100
Step27: Now we can use interact_func in an interaction
Step28: Exercise 2
Step29: Test your code using the cell below
Step30: When your StdResampler is working, you should be able to interact with it
Step31: STOP HERE
Step32: And here's the men's distribution
Step33: I'll simulate a sample of 100 men and 100 women
Step34: The difference in means should be about 17 kg, but will vary from one random sample to the next
Step36: Here's the function that computes Cohen's effect size again
Step37: The difference in weight between men and women is about 1 standard deviation
Step38: Now we can write a version of the Resampler that computes the sampling distribution of $d$.
Step39: Now we can instantiate a CohenResampler and plot the sampling distribution.
|
12,521
|
<ASSISTANT_TASK:>
Python Code:
%%bash
git --help
%%bash
git clone https://github.com/cosmoscalibur/herramientas_computacionales.git herramientas
%%bash
cd herramientas
ls -oha
%%bash
cd herramientas
git remote -v
%%bash
cd herramientas
git init
%%bash
cd herramientas
git remote add pruebas https://github.com/cosmoscalibur/pruebas.git
git remote -v
%%bash
cd herramientas
git remote rm origin
git remote -v
%%bash
cd herramientas
echo "linea 1" > probar_1
printf "s\n 5" > probar_2
%%bash
cd herramientas
git status
%%bash
cd herramientas
git add . # Con la indicación "." agrega todo.
git commit -m "Agregados archivos de prueba 1 y 2."
%%bash
cd herramientas
echo "linea 2" >> probar_1
printf "otro\narchivo\npara probar." > probar_3
git add probar_1 probar_3
git commit -m "Actualizado prueba 1 y agregado prueba 3."
%%bash
cd herramientas
git push pruebas master
%%bash
cd herramientas
git log --oneline
%%bash
cd herramientas
git show d193ae6
%%bash
cd herramientas
echo "Actual"
ls
git checkout e4a4be5
echo "Anterior"
ls
git checkout master
%%bash
cd herramientas
echo "Actual"
cat probar_1
git checkout d193ae6 probar_1
echo "Anterior"
cat probar_1
git checkout HEAD probar_1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configuración
Step2: De esta forma crearemos una copia del repositorio git herramientas_computacionales en el directorio ./herramientas. Si no se indica el directorio, se crea un directorio nuevo en la posición actual.
Step3: Se evidencia que tras el clonado, tenemos configurados los repositorios remotos por defecto.
Step4: En caso de ser un repositorio nuevo o desear reinicializar (aceptar nuevas plantillas o mover el directorio), debemos crear el directorio .git y agregar los elementos que teniamos en nuestro directorio. Siempre que se agreguen (o se realiza cualquier actualización de los archivos y directorios, se sugiere realizar un commit para la indicación de la acción en nuestro historial).
Step5: Repositorios remotos
Step6: Tambien es posible eliminar el acceso al servidor remoto, con git remote rm.
Step7: Objetos y referencias
Step8: Aunque en este caso es claro que nuestros archivos no se encuentran agregados al repositorio, consultaremos por el estado de los archivos frente a su registro en el repositorio.
Step9: Ahora agregamos al registro los archivos de nuestro repositorio local.
Step10: Realizaremos una modificación en uno de los archivos y agregaremos directamente a este.
Step11: Es posible indicar directamente a git la eliminación de archivos mediante
Step12: Durante la ejecución del push se nos solicitará el usuario y contraseña de nuestra cuenta en el servicio git. Al usar el protocolo https por defecto tendremos 3600 segundos tras cada autenticación, o si lo preferimos podemos usar ssh y crear la llave de autenticación con el fin de tener acceso permanente en nuestro equipo privado sin requerir de la autenticación continua.
|
12,522
|
<ASSISTANT_TASK:>
Python Code::
import cv2
%matplotlib notebook
%matplotlib inline
from matplotlib import pyplot as plt
img = cv2.imread("hsv_ball.jpg",cv2.IMREAD_GRAYSCALE)
_,mask = cv2.threshold(img, 220,255,cv2.THRESH_BINARY_INV)
titles = ['images',"mask"]
images = [img,mask]
for i in range(2):
plt.subplot(1,2,i+1)
plt.imshow(images[i],"gray")
plt.title(titles[i])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
12,523
|
<ASSISTANT_TASK:>
Python Code:
import math
import time
import diffrax
import equinox as eqx # https://github.com/patrick-kidger/equinox
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
import jax.scipy as jsp
import matplotlib
import matplotlib.pyplot as plt
import optax # https://github.com/deepmind/optax
matplotlib.rcParams.update({"font.size": 30})
class Func(eqx.Module):
mlp: eqx.nn.MLP
data_size: int
hidden_size: int
def __init__(self, data_size, hidden_size, width_size, depth, *, key, **kwargs):
super().__init__(**kwargs)
self.data_size = data_size
self.hidden_size = hidden_size
self.mlp = eqx.nn.MLP(
in_size=hidden_size,
out_size=hidden_size * data_size,
width_size=width_size,
depth=depth,
activation=jnn.softplus,
# Note the use of a tanh final activation function. This is important to
# stop the model blowing up. (Just like how GRUs and LSTMs constrain the
# rate of change of their hidden states.)
final_activation=jnn.tanh,
key=key,
)
def __call__(self, t, y, args):
return self.mlp(y).reshape(self.hidden_size, self.data_size)
class NeuralCDE(eqx.Module):
initial: eqx.nn.MLP
func: Func
linear: eqx.nn.Linear
def __init__(self, data_size, hidden_size, width_size, depth, *, key, **kwargs):
super().__init__(**kwargs)
ikey, fkey, lkey = jrandom.split(key, 3)
self.initial = eqx.nn.MLP(data_size, hidden_size, width_size, depth, key=ikey)
self.func = Func(data_size, hidden_size, width_size, depth, key=fkey)
self.linear = eqx.nn.Linear(hidden_size, 1, key=lkey)
def __call__(self, ts, coeffs, evolving_out=False):
# Each sample of data consists of some timestamps `ts`, and some `coeffs`
# parameterising a control path. These are used to produce a continuous-time
# input path `control`.
control = diffrax.CubicInterpolation(ts, coeffs)
term = diffrax.ControlTerm(self.func, control).to_ode()
solver = diffrax.Tsit5()
dt0 = None
y0 = self.initial(control.evaluate(ts[0]))
if evolving_out:
saveat = diffrax.SaveAt(ts=ts)
else:
saveat = diffrax.SaveAt(t1=True)
solution = diffrax.diffeqsolve(
term,
solver,
ts[0],
ts[-1],
dt0,
y0,
stepsize_controller=diffrax.PIDController(rtol=1e-3, atol=1e-6),
saveat=saveat,
)
if evolving_out:
prediction = jax.vmap(lambda y: jnn.sigmoid(self.linear(y))[0])(solution.ys)
else:
(prediction,) = jnn.sigmoid(self.linear(solution.ys[-1]))
return prediction
def get_data(dataset_size, add_noise, *, key):
theta_key, noise_key = jrandom.split(key, 2)
length = 100
theta = jrandom.uniform(theta_key, (dataset_size,), minval=0, maxval=2 * math.pi)
y0 = jnp.stack([jnp.cos(theta), jnp.sin(theta)], axis=-1)
ts = jnp.broadcast_to(jnp.linspace(0, 4 * math.pi, length), (dataset_size, length))
matrix = jnp.array([[-0.3, 2], [-2, -0.3]])
ys = jax.vmap(
lambda y0i, ti: jax.vmap(lambda tij: jsp.linalg.expm(tij * matrix) @ y0i)(ti)
)(y0, ts)
ys = jnp.concatenate([ts[:, :, None], ys], axis=-1) # time is a channel
ys = ys.at[: dataset_size // 2, :, 1].multiply(-1)
if add_noise:
ys = ys + jrandom.normal(noise_key, ys.shape) * 0.1
coeffs = jax.vmap(diffrax.backward_hermite_coefficients)(ts, ys)
labels = jnp.zeros((dataset_size,))
labels = labels.at[: dataset_size // 2].set(1.0)
_, _, data_size = ys.shape
return ts, coeffs, labels, data_size
def dataloader(arrays, batch_size, *, key):
dataset_size = arrays[0].shape[0]
assert all(array.shape[0] == dataset_size for array in arrays)
indices = jnp.arange(dataset_size)
while True:
perm = jrandom.permutation(key, indices)
(key,) = jrandom.split(key, 1)
start = 0
end = batch_size
while end < dataset_size:
batch_perm = perm[start:end]
yield tuple(array[batch_perm] for array in arrays)
start = end
end = start + batch_size
def main(
dataset_size=256,
add_noise=False,
batch_size=32,
lr=1e-2,
steps=20,
hidden_size=8,
width_size=128,
depth=1,
seed=5678,
):
key = jrandom.PRNGKey(seed)
train_data_key, test_data_key, model_key, loader_key = jrandom.split(key, 4)
ts, coeffs, labels, data_size = get_data(
dataset_size, add_noise, key=train_data_key
)
model = NeuralCDE(data_size, hidden_size, width_size, depth, key=model_key)
# Training loop like normal.
@eqx.filter_jit
def loss(model, ti, label_i, coeff_i):
pred = jax.vmap(model)(ti, coeff_i)
# Binary cross-entropy
bxe = label_i * jnp.log(pred) + (1 - label_i) * jnp.log(1 - pred)
bxe = -jnp.mean(bxe)
acc = jnp.mean((pred > 0.5) == (label_i == 1))
return bxe, acc
grad_loss = eqx.filter_value_and_grad(loss, has_aux=True)
@eqx.filter_jit
def make_step(model, data_i, opt_state):
ti, label_i, *coeff_i = data_i
(bxe, acc), grads = grad_loss(model, ti, label_i, coeff_i)
updates, opt_state = optim.update(grads, opt_state)
model = eqx.apply_updates(model, updates)
return bxe, acc, model, opt_state
optim = optax.adam(lr)
opt_state = optim.init(eqx.filter(model, eqx.is_inexact_array))
for step, data_i in zip(
range(steps), dataloader((ts, labels) + coeffs, batch_size, key=loader_key)
):
start = time.time()
bxe, acc, model, opt_state = make_step(model, data_i, opt_state)
end = time.time()
print(
f"Step: {step}, Loss: {bxe}, Accuracy: {acc}, Computation time: "
f"{end - start}"
)
ts, coeffs, labels, _ = get_data(dataset_size, add_noise, key=test_data_key)
bxe, acc = loss(model, ts, labels, coeffs)
print(f"Test loss: {bxe}, Test Accuracy: {acc}")
# Plot results
sample_ts = ts[-1]
sample_coeffs = tuple(c[-1] for c in coeffs)
pred = model(sample_ts, sample_coeffs, evolving_out=True)
interp = diffrax.CubicInterpolation(sample_ts, sample_coeffs)
values = jax.vmap(interp.evaluate)(sample_ts)
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2, projection="3d")
ax1.plot(sample_ts, values[:, 1], c="dodgerblue")
ax1.plot(sample_ts, values[:, 2], c="dodgerblue", label="Data")
ax1.plot(sample_ts, pred, c="crimson", label="Classification")
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_xlabel("t")
ax1.legend()
ax2.plot(values[:, 1], values[:, 2], c="dodgerblue", label="Data")
ax2.plot(values[:, 1], values[:, 2], pred, c="crimson", label="Classification")
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_zticks([])
ax2.set_xlabel("x")
ax2.set_ylabel("y")
ax2.set_zlabel("Classification")
plt.tight_layout()
plt.savefig("neural_cde.png")
plt.show()
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First let's define the vector field for the CDE.
Step2: Now wrap up the whole CDE solve into a model.
Step3: Toy dataset of spirals.
Step4: The main entry point. Try running main() to train the neural CDE.
|
12,524
|
<ASSISTANT_TASK:>
Python Code:
import sys
import pandas as pd # check out Modin https://towardsdatascience.com/get-faster-pandas-with-modin-even-on-your-laptops-b527a2eeda74
import numpy as np
import json
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import datetime
# Add path to APS modules
aps_pth = Path('.').absolute()
print(aps_pth)
if aps_pth not in sys.path:
sys.path.append(aps_pth)
sns.set(style="white")
#from sklearn.preprocessing import LabelEncoder
#from pprint import pprint
#pd.set_option("display.max_rows",6)
%matplotlib inline
# analysis of entire data set - collected using varsomdata2.varsomscripts.avalanchewarningscomplete.get_season_17_18()
#data_pth = Path(r'.\aps\data\varsom\norwegian_avalanche_warnings_season_17_18.csv')
data_pth = Path(r'D:\Dev\APS\aps\data\varsom\norwegian_avalanche_warnings_season_16_19.csv')
#varsom_df = pd.read_csv(aps_pth / data_pth, index_col=0)
varsom_df = pd.read_csv(data_pth, index_col=0)
varsom_df.head()
varsom_df.columns.values
varsom_df[varsom_df['region_id']==3012].filter(['avalanche_problem_1_cause_id', 'avalanche_problem_1_cause_name',
'avalanche_problem_1_destructive_size_ext_id',
'avalanche_problem_1_destructive_size_ext_name',
'avalanche_problem_1_distribution_id',
'avalanche_problem_1_distribution_name',
'avalanche_problem_1_exposed_height_1',
'avalanche_problem_1_exposed_height_2',
'avalanche_problem_1_exposed_height_fill',
'avalanche_problem_1_ext_id', 'avalanche_problem_1_ext_name',
'avalanche_problem_1_probability_id',
'avalanche_problem_1_probability_name',
'avalanche_problem_1_problem_id',
'avalanche_problem_1_problem_type_id',
'avalanche_problem_1_problem_type_name',
'avalanche_problem_1_trigger_simple_id',
'avalanche_problem_1_trigger_simple_name',]).head(10)
# for col in varsom_df.columns.values:
# print(f'{col}: {varsom_df[col].unique()} \n')
# Find the amount of NaN values in each column
print(varsom_df.isnull().sum().sort_values(ascending=False))
varsom_df['mountain_weather_wind_speed'] = varsom_df['mountain_weather_wind_speed'].fillna('None')
varsom_df['mountain_weather_wind_direction'] = varsom_df['mountain_weather_wind_direction'].fillna('None')
print(varsom_df.isnull().sum().sort_values(ascending=False))
varsom_df['date'] = pd.to_datetime(varsom_df['date_valid'], infer_datetime_format=True)
def add_prevday_features(df):
### danger level
df['danger_level_prev1day'] = df['danger_level'].shift(1)
df['danger_level_name_prev1day'] = df['danger_level_name'].shift(1)
df['danger_level_prev2day'] = df['danger_level'].shift(2)
df['danger_level_name_prev2day'] = df['danger_level_name'].shift(2)
df['danger_level_prev3day'] = df['danger_level'].shift(3)
df['danger_level_name_prev3day'] = df['danger_level_name'].shift(3)
### avalanche problem
df['avalanche_problem_1_cause_id_prev1day'] = df['avalanche_problem_1_cause_id'].shift(1)
df['avalanche_problem_1_problem_type_id_prev1day'] = df['avalanche_problem_1_problem_type_id'].shift(1)
df['avalanche_problem_1_cause_id_prev2day'] = df['avalanche_problem_1_cause_id'].shift(2)
df['avalanche_problem_1_problem_type_id_prev2day'] = df['avalanche_problem_1_problem_type_id'].shift(2)
df['avalanche_problem_1_cause_id_prev3day'] = df['avalanche_problem_1_cause_id'].shift(3)
df['avalanche_problem_1_problem_type_id_prev3day'] = df['avalanche_problem_1_problem_type_id'].shift(3)
df['avalanche_problem_2_cause_id_prev1day'] = df['avalanche_problem_2_cause_id'].shift(1)
df['avalanche_problem_2_problem_type_id_prev1day'] = df['avalanche_problem_2_problem_type_id'].shift(1)
df['avalanche_problem_2_cause_id_prev2day'] = df['avalanche_problem_2_cause_id'].shift(2)
df['avalanche_problem_2_problem_type_id_prev2day'] = df['avalanche_problem_2_problem_type_id'].shift(2)
df['avalanche_problem_2_cause_id_prev3day'] = df['avalanche_problem_2_cause_id'].shift(3)
df['avalanche_problem_2_problem_type_id_prev3day'] = df['avalanche_problem_2_problem_type_id'].shift(3)
### weather
df['mountain_weather_temperature_max_prev1day'] = df['mountain_weather_temperature_max'].shift(1)
df['mountain_weather_temperature_max_prev2day'] = df['mountain_weather_temperature_max'].shift(2)
df['mountain_weather_temperature_max_prev3day'] = df['mountain_weather_temperature_max'].shift(3)
df['mountain_weather_temperature_min_prev1day'] = df['mountain_weather_temperature_min'].shift(1)
df['mountain_weather_temperature_min_prev2day'] = df['mountain_weather_temperature_min'].shift(2)
df['mountain_weather_temperature_min_prev3day'] = df['mountain_weather_temperature_min'].shift(3)
df['mountain_weather_precip_region_prev1day'] = df['mountain_weather_precip_region'].shift(1)
df['mountain_weather_precip_most_exposed_prev1day'] = df['mountain_weather_precip_most_exposed'].shift(1)
df['mountain_weather_precip_region_prev3daysum'] = df['mountain_weather_precip_region'].shift(1) + df['mountain_weather_precip_region'].shift(2) + df['mountain_weather_precip_region'].shift(3)
return df
varsom_df[(varsom_df['date']>=datetime.date(year=2016, month=12, day=1)) & (varsom_df['date']<datetime.date(year=2017, month=6, day=1))]
# grouping by region and season
grouped_df = pd.DataFrame()
for id in varsom_df['region_id'].unique():
#for id in [3003, 3011, 3014, 3028]:
_tmp_df = varsom_df[varsom_df['region_id']==id].copy()
_tmp_df.sort_values(by='valid_from')
start, stop = int(_tmp_df['date_valid'].min()[:4]), int(_tmp_df['date_valid'].max()[:4])
for yr in range(start, stop-1):
_tmp_df[(_tmp_df['date']>=datetime.date(year=yr, month=12, day=1)) & (_tmp_df['date']<datetime.date(year=yr+1, month=6, day=1))]
_tmp_df = add_prevday_features(_tmp_df)
#print(len(_tmp_df), _tmp_df['region_id'].unique())
if grouped_df.empty:
print('empty')
grouped_df = _tmp_df.copy()
else:
grouped_df = pd.concat([grouped_df, _tmp_df], ignore_index=True).copy()
#print('g', len(grouped_df), grouped_df['region_id'].unique())
grouped_df.filter(['valid_from', 'region_name', 'region_id', 'avalanche_problem_1_problem_type_id', 'avalanche_problem_1_problem_type_id_prev2day'])
varsom_df = grouped_df.copy()
#from aps.notebooks.ml_varsom.regroup_forecast import regroup
from regroup_forecast import regroup
varsom_df = regroup(varsom_df)
# Check if sensitivity transformation worked...
print(varsom_df['avalanche_problem_1_sensitivity_id_class'].value_counts())
varsom_df.filter(['mountain_weather_precip_region', 'mountain_weather_precip_region_prev3daysum']).head(12)
varsom_df[varsom_df['region_id']==3012].filter(['region_id', 'danger_level', 'danger_level_prev1day']).head(40)
def get_aval_problem_combined(type_, dist_, sens_, size_):
return int("{0}{1}{2}{3}".format(type_, dist_, sens_, size_))
def print_aval_problem_combined(aval_combined_int):
aval_combined_str = str(aval_combined_int)
#with open(aps_pth / r'aps/config/snoskred_keys.json') as jdata:
with open(r'D:\Dev\APS\aps\config\snoskred_keys.json') as jdata:
snoskred_keys = json.load(jdata)
type_ = snoskred_keys["Class_AvalancheProblemTypeName"][aval_combined_str[0]]
dist_ = snoskred_keys["Class_AvalDistributionName"][aval_combined_str[1]]
sens_ = snoskred_keys["Class_AvalSensitivityId"][aval_combined_str[2]]
size_ = snoskred_keys["DestructiveSizeId"][aval_combined_str[3]]
return f"{type_}:{dist_}:{sens_}:{size_}"
print(print_aval_problem_combined(6221))
varsom_df['aval_problem_1_combined'] = varsom_df.apply(lambda row: get_aval_problem_combined(row['avalanche_problem_1_problem_type_id_class'],
row['avalanche_problem_1_distribution_id'],
row['avalanche_problem_1_sensitivity_id_class'], #avalanche_problem_1_trigger_simple_id_class / avalanche_problem_1_sensitivity_id_class
row['avalanche_problem_1_destructive_size_ext_id']), axis=1)
aval_uni = varsom_df['aval_problem_1_combined'].unique()
print(aval_uni, len(aval_uni))
print(varsom_df['aval_problem_1_combined'].value_counts())
print(varsom_df['avalanche_problem_1_problem_type_id_class'].value_counts())
# hot encode
hot_encode_ = ['emergency_warning', 'author', 'mountain_weather_wind_direction']
varsom_df = pd.get_dummies(varsom_df, columns=hot_encode_)
# Check if there are no weired or missing values.
for col in varsom_df.columns.values:
print(f'{col}: {varsom_df[col].unique()} \n')
del_list = [
'utm_zone',
'utm_east',
'utm_north',
'danger_level_name',
'avalanche_problem_1_exposed_height_fill',
'avalanche_problem_2_exposed_height_fill',
'avalanche_problem_3_exposed_height_fill',
'avalanche_problem_1_valid_expositions',
'avalanche_problem_2_valid_expositions',
'avalanche_problem_3_valid_expositions',
'avalanche_problem_1_cause_name',
'avalanche_problem_1_problem_type_name',
'avalanche_problem_1_destructive_size_ext_name',
'avalanche_problem_1_distribution_name',
'avalanche_problem_1_ext_name',
'avalanche_problem_1_probability_name',
'avalanche_problem_1_trigger_simple_name',
'avalanche_problem_1_type_name',
'avalanche_problem_2_cause_name',
'avalanche_problem_2_problem_type_name',
'avalanche_problem_2_destructive_size_ext_name',
'avalanche_problem_2_distribution_name',
'avalanche_problem_2_ext_name',
'avalanche_problem_2_probability_name',
'avalanche_problem_2_trigger_simple_name',
'avalanche_problem_2_type_name',
'avalanche_problem_3_cause_name',
'avalanche_problem_3_problem_type_name',
'avalanche_problem_3_destructive_size_ext_name',
'avalanche_problem_3_distribution_name',
'avalanche_problem_3_ext_name',
'avalanche_problem_3_probability_name',
'avalanche_problem_3_trigger_simple_name',
'avalanche_problem_3_type_name',
'latest_avalanche_activity',
'main_text',
'snow_surface',
'current_weak_layers',
'avalanche_danger',
'avalanche_problem_1_advice',
'avalanche_problem_2_advice',
'avalanche_problem_3_advice',
'mountain_weather_wind_speed',
'region_type_name',
'region_name',
'reg_id',
'valid_from',
'valid_to'
]
removed_ = [varsom_df.pop(v) for v in del_list]
removed_
fill_list = [
'mountain_weather_freezing_level',
'mountain_weather_precip_region',
'mountain_weather_precip_region_prev1day',
'mountain_weather_precip_region_prev3daysum',
'mountain_weather_precip_most_exposed',
'mountain_weather_precip_most_exposed_prev1day',
'mountain_weather_temperature_min',
'mountain_weather_temperature_max',
'mountain_weather_temperature_elevation',
'danger_level_prev3day',
'avalanche_problem_1_problem_type_id_prev3day',
'avalanche_problem_2_problem_type_id_prev3day',
'avalanche_problem_2_cause_id_prev3day',
'avalanche_problem_1_cause_id_prev3day',
'danger_level_prev2day',
'avalanche_problem_1_cause_id_prev2day',
'avalanche_problem_1_problem_type_id_prev2day',
'avalanche_problem_2_cause_id_prev2day',
'avalanche_problem_2_problem_type_id_prev2day',
'avalanche_problem_2_cause_id_prev1day',
'avalanche_problem_2_problem_type_id_prev1day',
'avalanche_problem_1_problem_type_id_prev1day',
'avalanche_problem_1_cause_id_prev1day',
'danger_level_prev1day'
]
filled_ = [varsom_df[v].fillna(0., inplace=True) for v in fill_list]
filled_
del_list = [
'danger_level_name_prev1day', 'danger_level_name_prev2day', 'danger_level_name_prev3day',
'mountain_weather_change_wind_direction',
'mountain_weather_change_hour_of_day_start',
'mountain_weather_change_hour_of_day_stop',
'mountain_weather_change_wind_speed',
'mountain_weather_fl_hour_of_day_stop',
'mountain_weather_fl_hour_of_day_start',
'latest_observations', 'publish_time', 'date_valid',
'mountain_weather_temperature_max_prev3day', 'mountain_weather_temperature_min_prev3day',
'mountain_weather_temperature_max_prev2day',
'mountain_weather_temperature_min_prev2day',
'mountain_weather_temperature_max_prev1day',
'mountain_weather_temperature_min_prev1day'
]
removed_ = [varsom_df.pop(v) for v in del_list]
# Find the amount of NaN values in each column
print(varsom_df.isnull().sum().sort_values(ascending=False))
# Compute the correlation matrix - works only on numerical variables.
corr = varsom_df.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 11))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(1000, 15, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.8, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
#corr['avalanche_problem_1_cause_id'].sort_values(ascending=False)
#corr
#sns.pairplot(varsom_df.drop(['date_valid'], axis=1))
# Get all numerical features
num_feat = varsom_df._get_numeric_data().columns
num_feat
# let's see the details about remainig variables
varsom_df.describe()
varsom_df.to_csv('varsom_ml_preproc_3y.csv', index_label='index')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check if there are missing values.
Step2: Fill missing values where necessary.
Step3: Feature engineering
Step4: Add historical values, e.g. yesterdays precipitation
Step5: Combine avalanche problem attributes into single parameter
Step6: Hot encode categorical variables where necessary.
Step7: Check if there are no weired or missing values.
Step8: Remove variables we know we do not need. In this case mainly because they are redundant like the avalanche_problem_1_ext_name and avalanche_problem_1_ext_id - in this case we only keep the numeric id variable.
Step9: Fill missing values where necessary
Step10: Eventually remove variables with many missing values.
Step11: Check again if there are still values missing...
Step12: We can see that some parameters are highly correlated. These are mainly the parameters belonging to the same avalanche problem. Depending on the ML algorithm we use we have to remove some of them.
Step13: Save data for further analysis
|
12,525
|
<ASSISTANT_TASK:>
Python Code:
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
root = logging.getLogger()
root.addHandler(logging.StreamHandler())
%matplotlib inline
# download from Google Drive: https://drive.google.com/open?id=0B9cazFzBtPuCOFNiUHYwcVFVODQ
# Representative example with multiple polygons in the shapefile, and a lot of point-records (also outside rangemaps)
from iSDM.species import IUCNSpecies
salmo_trutta = IUCNSpecies(name_species='Salmo trutta')
salmo_trutta.load_shapefile("../data/fish/selection/salmo_trutta")
rasterized = salmo_trutta.rasterize(raster_file="./salmo_trutta_full.tif", pixel_size=0.5, all_touched=True)
plt.figure(figsize=(25,20))
plt.imshow(rasterized, cmap="hot", interpolation="none")
from iSDM.environment import RasterEnvironmentalLayer
biomes_adf = RasterEnvironmentalLayer(file_path="../data/rebioms/w001001.adf", name_layer="Biomes")
biomes_adf.load_data()
biomes_adf.plot()
from iSDM.environment import ContinentsLayer
from iSDM.environment import Source
continents = ContinentsLayer(file_path="../data/continents/continent.shp", source=Source.ARCGIS)
continents.load_data()
fig, ax = plt.subplots(1,1, figsize=(30,20))
continents.data_full.plot(column="continent", colormap="hsv")
continents_rasters = continents.rasterize(raster_file="../data/continents/continents_raster.tif", pixel_size=0.5, all_touched=True)
continents_rasters.shape # stacked raster with 8 bands, one for each continent.
selected_layers, pseudo_absences = biomes_adf.sample_pseudo_absences(species_raster_data=rasterized, continents_raster_data=continents_rasters, number_of_pseudopoints=1000)
plt.figure(figsize=(25,20))
plt.imshow(selected_layers, cmap="hot", interpolation="none")
plt.figure(figsize=(25,20))
plt.imshow(pseudo_absences, cmap="hot", interpolation="none")
all_coordinates = biomes_adf.pixel_to_world_coordinates(raster_data=np.zeros_like(rasterized), filter_no_data_value=False)
all_coordinates
base_dataframe = pd.DataFrame([all_coordinates[0], all_coordinates[1]]).T
base_dataframe.columns=['decimallatitude', 'decimallongitude']
base_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
base_dataframe.head()
base_dataframe.tail()
presence_coordinates = salmo_trutta.pixel_to_world_coordinates()
presence_coordinates
presences_dataframe = pd.DataFrame([presence_coordinates[0], presence_coordinates[1]]).T
presences_dataframe.columns=['decimallatitude', 'decimallongitude']
presences_dataframe[salmo_trutta.name_species] = 1 # fill presences with 1's
presences_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
presences_dataframe.head()
presences_dataframe.tail()
pseudo_absence_coordinates = biomes_adf.pixel_to_world_coordinates(raster_data=pseudo_absences)
pseudo_absences_dataframe = pd.DataFrame([pseudo_absence_coordinates[0], pseudo_absence_coordinates[1]]).T
pseudo_absences_dataframe.columns=['decimallatitude', 'decimallongitude']
pseudo_absences_dataframe[salmo_trutta.name_species] = 0
pseudo_absences_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
pseudo_absences_dataframe.head()
pseudo_absences_dataframe.tail()
from iSDM.environment import ClimateLayer
water_min_layer = ClimateLayer(file_path="../data/watertemp/min_wt_2000.tif")
water_min_reader = water_min_layer.load_data()
# HERE: should we ignore cells with no-data values for temperature? They are set to a really big negative number
# for now we keep them, otherwise could be NaN
water_min_coordinates = water_min_layer.pixel_to_world_coordinates(filter_no_data_value=False)
water_min_coordinates
mintemp_dataframe = pd.DataFrame([water_min_coordinates[0], water_min_coordinates[1]]).T
mintemp_dataframe.columns=['decimallatitude', 'decimallongitude']
water_min_matrix = water_min_reader.read(1)
mintemp_dataframe['MinT'] = water_min_matrix.reshape(np.product(water_min_matrix.shape))
mintemp_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
mintemp_dataframe.head()
mintemp_dataframe.tail()
water_max_layer = ClimateLayer(file_path="../data/watertemp/max_wt_2000.tif")
water_max_reader = water_max_layer.load_data()
# HERE: should we ignore cells with no-data values for temperature? They are set to a really big negative number
# for now we keep them, otherwise could be NaN
water_max_coordinates = water_max_layer.pixel_to_world_coordinates(filter_no_data_value=False)
maxtemp_dataframe = pd.DataFrame([water_max_coordinates[0], water_max_coordinates[1]]).T
maxtemp_dataframe.columns=['decimallatitude', 'decimallongitude']
water_max_matrix = water_max_reader.read(1)
maxtemp_dataframe['MaxT'] = water_max_matrix.reshape(np.product(water_max_matrix.shape))
maxtemp_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
maxtemp_dataframe.head()
maxtemp_dataframe.tail()
water_mean_layer = ClimateLayer(file_path="../data/watertemp/mean_wt_2000.tif")
water_mean_reader = water_mean_layer.load_data()
# HERE: should we ignore cells with no-data values for temperature? They are set to a really big negative number
# for now we keep them, otherwise could be NaN
water_mean_coordinates = water_mean_layer.pixel_to_world_coordinates(filter_no_data_value=False)
meantemp_dataframe = pd.DataFrame([water_mean_coordinates[0], water_mean_coordinates[1]]).T
meantemp_dataframe.columns=['decimallatitude', 'decimallongitude']
water_mean_matrix = water_mean_reader.read(1)
meantemp_dataframe['MeanT'] = water_mean_matrix.reshape(np.product(water_mean_matrix.shape))
meantemp_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
meantemp_dataframe.head()
meantemp_dataframe.tail()
# merge base with presences
merged = base_dataframe.combine_first(presences_dataframe)
merged.head()
merged.tail()
# merge based+presences with pseudo-absences
# merged2 = pd.merge(merged1, pseudo_absences_dataframe, on=["decimallatitude", "decimallongitude", salmo_trutta.name_species], how="outer")
merged = merged.combine_first(pseudo_absences_dataframe)
merged.head()
merged.tail()
# merge base+presences+pseudo-absences with min temperature
#merged3 = pd.merge(merged2, mintemp_dataframe, on=["decimallatitude", "decimallongitude"], how="outer")
merged = merged.combine_first(mintemp_dataframe)
merged.head()
merged.tail()
# merged4 = pd.merge(merged3, maxtemp_dataframe, on=["decimallatitude", "decimallongitude"], how="outer")
merged = merged.combine_first(maxtemp_dataframe)
merged.head()
merged.tail()
# merged5 = pd.merge(merged4, meantemp_dataframe, on=["decimallatitude", "decimallongitude"], how="outer")
merged = merged.combine_first(meantemp_dataframe)
merged.tail()
merged.to_csv("../data/fish/selection/salmo_trutta_again.csv")
merged[merged['Salmo trutta']==0].shape[0] # should be equal to number of pseudo absences below
pseudo_absence_coordinates[0].shape[0]
merged[merged['Salmo trutta']==1].shape[0] # should be equal to number of presences below
presence_coordinates[0].shape[0]
merged[merged['Salmo trutta'].isnull()].shape[0] # all that's left
360 * 720 == merged[merged['Salmo trutta']==0].shape[0] + merged[merged['Salmo trutta']==1].shape[0] + merged[merged['Salmo trutta'].isnull()].shape[0]
# == all pixels in 360 x 720 matrix
merged[merged['Salmo trutta']==0.0]
pseudo_absences_dataframe
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Choose a representative species for a case study
Step2: 2. Rasterize the species, to get a matrix of pixels
Step3: 2.1 Plot to get an idea
Step4: 3. Load the biogeographical regons raster layer
Step5: 3.1 Plot to get an idea
Step6: 3.2 Load the continents vector layer (for further clipping of pseudo-absence area), rasterize
Step7: 4. Sample pseudo-absence pixels, taking into account all the distinct biomes that fall in the species region.
Step8: 4.1 Plot the biomes taken into account for sampling pseudo-absences, to get an idea
Step9: 4.2 Plot the sampled pseudo-absences, to get an idea
Step10: 5. Construct a convenient dataframe for testing with different SDM models
Step11: 5.2 Get arrays of coordinates (latitude/longitude) for each cell (middle point) in a presences pixel map
Step12: 5.3 Get arrays of coordinates (latitude/longitude) for each cell (middle point) in a pseudo_absences pixel map
Step13: 5.4 Get arrays of coordinates (latitude/longitude) for each cell (middle point) in a minimum temperature pixel map
Step14: 5.5 Get arrays of coordinates (latitude/longitude) for each cell (middle point) in a maximum temperature pixel map
Step15: 5.6 Get arrays of coordinates (latitude/longitude) for each cell (middle point) in a mean temperature pixel map
Step16: http
|
12,526
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
!python -m pip install iree-compiler iree-runtime iree-tools-tf -f https://github.com/google/iree/releases
#@title Imports
import os
import tempfile
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
from iree import runtime as ireert
from iree.tf.support import module_utils
from iree.compiler import compile_str
from iree.compiler import tf as tfc
#@title Setup Artifacts Directory
# Used in the low-level compilation section.
ARTIFACTS_DIR = os.path.join(tempfile.gettempdir(), "iree", "colab_artifacts")
os.makedirs(ARTIFACTS_DIR, exist_ok=True)
#@title Define the EdgeDetectionModule
class EdgeDetectionModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([1, 128, 128, 1], tf.float32)])
def edge_detect_sobel_operator(self, image):
# https://en.wikipedia.org/wiki/Sobel_operator
sobel_x = tf.constant([[-1.0, 0.0, 1.0],
[-2.0, 0.0, 2.0],
[-1.0, 0.0, 1.0]],
dtype=tf.float32, shape=[3, 3, 1, 1])
sobel_y = tf.constant([[ 1.0, 2.0, 1.0],
[ 0.0, 0.0, 0.0],
[-1.0, -2.0, -1.0]],
dtype=tf.float32, shape=[3, 3, 1, 1])
gx = tf.nn.conv2d(image, sobel_x, 1, "SAME")
gy = tf.nn.conv2d(image, sobel_y, 1, "SAME")
return tf.math.sqrt(gx * gx + gy * gy)
tf_module = EdgeDetectionModule()
#@title Load a test image of a [labrador](https://commons.wikimedia.org/wiki/File:YellowLabradorLooking_new.jpg) and run the module with TF
def load_image(path_to_image):
image = tf.io.read_file(path_to_image)
image = tf.image.decode_image(image, channels=1)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, (128, 128))
image = image[tf.newaxis, :]
return image
content_path = tf.keras.utils.get_file(
'YellowLabradorLooking_new.jpg',
'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg')
image = load_image(content_path).numpy()
def show_images(image, edges):
fig, axs = plt.subplots(1, 2)
axs[0].imshow(image.reshape(128, 128), cmap="gray")
axs[0].set_title("Input image")
axs[1].imshow(edges.reshape(128, 128), cmap="gray")
axs[1].set_title("Output image")
axs[0].axis("off")
axs[1].axis("off")
fig.tight_layout()
fig.show()
# Invoke the function with the image as an argument
tf_edges = tf_module.edge_detect_sobel_operator(image).numpy()
# Plot the input and output images
show_images(image, tf_edges)
#@markdown ### Backend Configuration
backend_choice = "iree_vmvx (CPU)" #@param [ "iree_vmvx (CPU)", "iree_llvmaot (CPU)", "iree_vulkan (GPU/SwiftShader)" ]
backend_choice = backend_choice.split(" ")[0]
backend = module_utils.BackendInfo(backend_choice)
#@title Compile and Run the EdgeDetectionModule with IREE.
module = backend.compile_from_class(EdgeDetectionModule)
# Compute the edges using the compiled module and display the result.
iree_edges = module.edge_detect_sobel_operator(image)
show_images(image, iree_edges)
#@title Construct a module containing the edge detection function
# Do *not* further compile to a bytecode module for a particular backend.
#
# By stopping at mhlo in text format, we can more easily take advantage of
# future compiler improvements within IREE and can use iree_bytecode_module to
# compile and bundle the module into a sample application. For a production
# application, we would probably want to freeze the version of IREE used and
# compile as completely as possible ahead of time, then use some other scheme
# to load the module into the application at runtime.
compiler_module = tfc.compile_module(EdgeDetectionModule(), import_only=True)
print("Edge Detection MLIR: ", compiler_module.decode('utf-8'))
edge_detection_mlir_path = os.path.join(ARTIFACTS_DIR, "edge_detection.mlir")
with open(edge_detection_mlir_path, "wt") as output_file:
output_file.write(compiler_module.decode('utf-8'))
print(f"Wrote MLIR to path '{edge_detection_mlir_path}'")
#@title Compile and prepare to test the edge detection module
flatbuffer_blob = compile_str(compiler_module, target_backends=["vmvx"], input_type="mhlo")
vm_module = ireert.VmModule.from_flatbuffer(flatbuffer_blob)
# Register the module with a runtime context.
config = ireert.Config(backend.driver)
ctx = ireert.SystemContext(config=config)
ctx.add_vm_module(vm_module)
edge_detect_sobel_operator_f = ctx.modules.module["edge_detect_sobel_operator"]
low_level_iree_edges = edge_detect_sobel_operator_f(image)
show_images(image, low_level_iree_edges)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image edge detection module
Step2: High Level Compilation With IREE
Step3: Low-Level Compilation
|
12,527
|
<ASSISTANT_TASK:>
Python Code:
import featuretools as ft
es = ft.demo.load_mock_customer(return_entityset=True)
feature_defs = ft.dfs(entityset=es,
target_dataframe_name="customers",
agg_primitives=["mean", "sum", "mode", "n_most_common"],
trans_primitives=["month", "hour"],
max_depth=2,
features_only=True)
feature_defs[9]
ft.describe_feature(feature_defs[9])
feature_defs[14]
ft.describe_feature(feature_defs[14])
feature_descriptions = {'customers: join_date': 'the date the customer joined'}
ft.describe_feature(feature_defs[9], feature_descriptions=feature_descriptions)
join_date_column_schema = es['customers'].ww.columns['join_date']
join_date_column_schema.description = 'the date the customer joined'
es['customers'].ww.columns['join_date'].description
feature = ft.TransformFeature(es['customers'].ww['join_date'], ft.primitives.Hour)
feature
ft.describe_feature(feature)
feature_descriptions = {
'sessions: SUM(transactions.amount)': 'the total transaction amount for a session'}
feature_defs[14]
ft.describe_feature(feature_defs[14], feature_descriptions=feature_descriptions)
primitive_templates = {'sum': 'the total of {}'}
feature_defs[6]
ft.describe_feature(feature_defs[6], primitive_templates=primitive_templates)
feature = feature_defs[5]
feature
primitive_templates = {
'n_most_common': [
'the 3 most common elements of {}', # generic multi-output feature
'the {nth_slice} most common element of {}']} # template for each slice
ft.describe_feature(feature, primitive_templates=primitive_templates)
ft.describe_feature(feature[0], primitive_templates=primitive_templates)
ft.describe_feature(feature[1], primitive_templates=primitive_templates)
ft.describe_feature(feature[2], primitive_templates=primitive_templates)
primitive_templates = {
'n_most_common': [
'the 3 most common elements of {}',
'the most common element of {}',
'the second most common element of {}',
'the third most common element of {}']}
ft.describe_feature(feature, primitive_templates=primitive_templates)
ft.describe_feature(feature[0], primitive_templates=primitive_templates)
ft.describe_feature(feature[1], primitive_templates=primitive_templates)
ft.describe_feature(feature[2], primitive_templates=primitive_templates)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: By default, describe_feature uses the existing column and DataFrame names and the default primitive description templates to generate feature descriptions.
Step2: Improving Descriptions
Step3: For example, the above replaces the column name, "join_date", with a more descriptive definition of what that column represents in the dataset. Descriptions can also be set directly on a column in a DataFrame by going through the Woodwork typing information to access the description attribute present on each ColumnSchema
Step4: Descriptions must be set for a column in a DataFrame before the feature is created in order for descriptions to propagate. Note that if a description is both set directly on a column and passed to describe_feature with feature_descriptions, the description in the feature_descriptions parameter will take presedence.
Step5: Here, we create and pass in a custom description of the intermediate feature SUM(transactions.amount). The description for MEAN(sessions.SUM(transactions.amount)), which is built on top of SUM(transactions.amount), uses the custom description in place of the automatically generated one. Feature descriptions can be passed in as a dictionary that maps the custom descriptions to either the feature object itself or the unique feature name in the form "[dataframe_name]
Step6: In this example, we override the default template of 'the sum of {}' with our custom template 'the total of {}'. The description uses our custom template instead of the default.
Step7: Notice how the multi-output feature uses the first template for its description. Each slice of this feature will use the second slice template
Step8: Alternatively, instead of supplying a single template for all slices, templates can be provided for each slice to further customize the output. Note that in this case, each slice must get its own template.
|
12,528
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib as mpl
import plotnine as p9
import matplotlib.pyplot as plt
import itertools
import warnings
warnings.simplefilter("ignore")
from sklearn import neighbors, preprocessing, impute, metrics, model_selection, linear_model, svm, feature_selection
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = 6,6
def train_bank_to_xy(bank):
standardize and impute training
bank_sel = bank[['age','balance','duration','y']].values
X,y = bank_sel[:,:-1], bank_sel[:,-1]
scaler = preprocessing.StandardScaler().fit(X)
imputer = impute.SimpleImputer(fill_value=0).fit(X)
trans_prep = lambda Z: imputer.transform(scaler.transform(Z))
X = trans_prep(X)
y = 2*(y == 'yes')-1
return (X, y), trans_prep
def test_bank_to_xy(bank, trans_prep):
standardize and impute test
bank_sel = bank[['age','balance','duration','y']].values
X,y = bank_sel[:,:-1], bank_sel[:,-1]
X = trans_prep(X)
y = 2*(y == 'yes')-1
return (X, y)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
bank = pd.read_csv('../../data/bank.csv',sep=';',na_values=['unknown',999,'nonexistent'])
bank.info()
bank_tr, bank_te = model_selection.train_test_split(bank,test_size=.33)
p9.ggplot(bank_tr, p9.aes(x = 'age',fill = 'y')) + p9.geom_density(alpha=.2)
(X_tr, y_tr), trans_prep = train_bank_to_xy(bank_tr)
X_te, y_te = test_bank_to_xy(bank_te, trans_prep)
def plot_conf_score(y_te,score,tau):
y_pred = 2*(score > tau) - 1
classes = [1,-1]
conf = metrics.confusion_matrix(y_te, y_pred,labels=classes)
plot_confusion_matrix(conf, classes)
score_dur = X_te[:,2]
p9.ggplot(bank_tr[['duration','y']].dropna(axis=0)) + p9.aes(x = 'duration',fill = 'y')\
+ p9.geom_density(alpha=.5)
plot_conf_score(y_te,score_dur,1.)
plot_conf_score(y_te,score_dur,2.)
## Fit and find NNs
nn = neighbors.NearestNeighbors(n_neighbors=10,metric="l2")
nn.fit(X_tr)
dists, NNs = nn.kneighbors(X_te)
NNs[1], y_tr[NNs[1]].mean(), y_te[1]
score_nn = np.array([(y_tr[knns] == 1).mean() for knns in NNs])
plot_conf_score(y_te,score_nn,.2)
nn = neighbors.KNeighborsClassifier(n_neighbors=10)
nn.fit(X_tr, y_tr)
score_nn = nn.predict_proba(X_te)[:,1]
plot_conf_score(y_te,score_nn,.2)
def print_top_k(score_dur,y_te,k_top):
ordering = np.argsort(score_dur)[::-1]
print("k: score, y")
for k, (yv,s) in enumerate(zip(y_te[ordering],score_dur[ordering])):
print("{}: {}, {}".format(k,s,yv))
if k >= k_top - 1:
break
print_top_k(score_dur,y_te,10)
plt.style.use('ggplot')
fpr_dur, tpr_dur, threshs = metrics.roc_curve(y_te,score_dur)
plt.figure(figsize=(6,6))
plt.plot(fpr_dur,tpr_dur)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title("ROC for 'duration'")
def plot_temp():
plt.figure(figsize=(6,6))
plt.plot(fpr_dur,tpr_dur,label='duration')
plt.plot(fpr_nn,tpr_nn,label='knn')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend()
plt.title("ROC")
fpr_nn, tpr_nn, threshs = metrics.roc_curve(y_te,score_nn)
plot_temp()
def plot_temp():
plt.figure(figsize=(6,6))
plt.plot(rec_dur,prec_dur,label='duration')
plt.plot(rec_nn,prec_nn,label='knn')
plt.xlabel('recall')
plt.ylabel('precision')
plt.legend()
plt.title("PR curve")
prec_dur, rec_dur, threshs = metrics.precision_recall_curve(y_te,score_dur)
prec_nn, rec_nn, threshs = metrics.precision_recall_curve(y_te,score_nn)
plot_temp()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Classification 1
Step4: Evaluating a classifier
Step5: Confusion matrix and metrics
|
12,529
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
from operator import itemgetter
from collections import Counter
import scipy.stats as stat
from gensim.models import Word2Vec
from nltk import corpus
import FastGaussianLDA2
wvmodel = Word2Vec.load_word2vec_format(
"/Users/michael/Documents/Gaussian_LDA-master/data/glove.wiki/glove.6B.50d.txt", binary=False)
print "word-vector dimension: {}".format(wvmodel.vector_size())
wv_vocab = set(wvmodel.vocab.keys())
stops = set(corpus.stopwords.words(fileids="english"))
corpus = []
nips_path = "/Users/michael/Documents/GaussianLDA/data/"
for folder in os.listdir(nips_path)[1:]:
for doc in os.listdir(nips_path + folder):
with open(nips_path + folder + "/" + doc, 'r') as f:
txt = f.read().split()
txt = map(lambda x: x.lower(), txt) # Lowercasing each word
txt = filter(lambda word: [letter for letter in word if ord(letter) < 128 ], txt) # Checking each word for ascci error
txt = filter(lambda x: x not in stops, txt) # Removing stop words
txt = filter(lambda x: x.isalpha(), txt) # Removing non-letter words (eg numbers and symbols)
txt = filter(lambda x: len(x) > 2, txt) # removing super short words and single letters
txt = filter(lambda x: x in wv_vocab, txt)
txt = ' '.join(txt)
corpus.append(txt)
print "Number of documents in corpus: {}".format(len(corpus))
reload(FastGaussianLDA2)
topics = 50
dim = 50
run_num = 1
outputfile = "/Users/michael/Documents/GaussianLDA/output/NIPS_{}_{}T_{}D_".format(str(run_num),
str(topics),
str(dim))
lda = FastGaussianLDA2.Gauss_LDA(topics, corpus, word_vector_model=wvmodel, alpha=.5, outputfile=outputfile)
lda.fit(50) # Number of samples to run
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the word_vector model with GenSim
Step2: Sets of vocab to filter on
Step3: Document cleaning
|
12,530
|
<ASSISTANT_TASK:>
Python Code:
class Pessoa(object):
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
joao = Pessoa()
joao = Pessoa('João', 20)
print(joao, '\n')
print(joao.nome)
print(joao.idade)
joao.nome = 'João Pedro'
print(joao.nome)
maria = Pessoa('Maria', 20)
print(maria)
print(maria.nome)
print(maria.idade)
class Pessoa(object):
def __init__(self, nome, idade):
self.nome = nome
self.__idade = idade
pedro = Pessoa('Pedro', 30)
print(pedro.nome)
print(pedro.__idade)
class Pessoa(object):
def __init__(self, nome, idade):
self.nome = nome
self.__idade = idade
@property
def idade(self):
return self.__idade
@idade.setter
def idade(self, nova_idade):
self.__idade = nova_idade
pedro = Pessoa('Pedro', 30)
print(pedro.nome)
pedro.idade = 50
print(pedro.idade)
class Pessoa(object):
def __init__(self, nome, idade):
self.nome = nome
self.__idade = idade
@property
def idade(self):
return self.__idade
@idade.setter
def idade(self, nova_idade):
self.__idade = nova_idade
class Atleta(Pessoa):
def __init__(self, nome, idade, peso):
Pessoa.__init__(self, nome, idade)
self.peso = peso
self.aposentado = False
def aquecer(self):
print("Atleta Aquecido")
def aposentar(self):
self.aposentado = True
jose = Atleta('Jose', 30, 80)
print(jose.aposentado)
jose.aposentar()
print(jose.aposentado)
jose.aquecer()
jose.nome = 'Zé'
print(jose.nome, jose.idade, jose.peso)
class Corredor(Atleta):
def correr(self):
print("Corredor correndo")
class Nadador(Atleta):
def nadar(self):
print("Nadador nadando")
class Ciclista(Atleta):
def pedalar(self):
print("Ciclista pedalando")
pedro = Ciclista('Pedro', 30, 100)
print(dir(pedro))
class Triatleta(Corredor, Nadador, Ciclista):
def triatlo(self):
print("Iniciar prova")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Encapsulamento
Step2: Desta forma não conseguimos recuperar e nem alterar o valor dessa variável privada. Para isso precisamos construir dois métodos, um para definir (setter) o valor e outro para recuperar (getter) o valor.
Step3: Herança
|
12,531
|
<ASSISTANT_TASK:>
Python Code:
# I import useful libraries (with functions) so I can visualize my data
# I use Pandas because this dataset has word/string column titles and I like the readability features of commands and finish visual products that Pandas offers
import pandas as pd
import matplotlib.pyplot as plt
import re
import numpy as np
%matplotlib inline
#I want to be able to easily scroll through this notebook so I limit the length of the appearance of my dataframes
from pandas import set_option
set_option('display.max_rows', 10)
#I import a pitch data file
#comment by nick changed the path you upload that data from making in compatible with clone copies of your project
pitch=pd.read_table('../Data/pitches.csv', sep=',')
#assigning columns names
#pitch.columns=[['date_time','section','note','freq1','freq2','freq3', 'freq4', 'freq5', 'freq6', 'freq7', 'freq8', 'freq9']]
#I display my dataframe
pitch
output = pitch['freq7'].dtype
output
#Test to see if data is a float
def test_data_type(data):
'''Check to see if a column contains only floats'''
obs = pitch['freq7'].dtype #I pass the dtype checking function through my test function
#print(obs)
exp = 'float64'
assert obs == 'float64', 'Data is not a float'
return
test_data_type(pitch['div'])
#Tell python that my date_time column has a "datetime" values, so it won't read as a string or object
pitch['time']= pd.to_datetime(pitch['time'])
#print the new table and the type of data to check that all columns are in line with the column names
print(pitch)
#Check the type of data in each column. This shows there are integers and floats, and datetime. This is good for analysing.
pitch.dtypes
#Calculate MEDIAN of pitch values for each time stamp
#pitch['median_freq'] = [[1,2,3,4,5,] axis=0]
#pitch['median_freq'] = np.median(pitch[3,9], axis=1, 'freq1' 'freq2' 'freq3', 'freq4', 'freq5', 'freq6', 'freq7', 'freq8', 'freq9')
#median_freq = pd.pitches(index=median_freq)
#pitch['median_freq'] = Series(np.random.randn(sLength), index=df1.index)
#pitch['median_freq'] = np.median(pitch[['freq1','freq2','freq3', 'freq4', 'freq5', 'freq6', 'freq7', 'freq8', 'freq9']], axis=1)
#Calculate standard mean of frequency values, add another column to dataframe
pitch['mean_freq'] = np.mean(pitch[['freq1','freq2','freq3', 'freq4', 'freq5', 'freq6', 'freq7', 'freq8', 'freq9']], axis=1)
#Calculate the standard deviation for the mean value of all frequency measurements
pitch['stdev_freq'] = np.std(pitch['mean_freq'])
#Now my data frame has me dian and standard deviation for frequency
pitch
#Group by section to compare to environmental measurements from the choir division
#selecting pitch values for the choir division, which is closest to the CO2 sensor
organized_pitch = pitch.groupby(['div']).get_group('choir')
# "organized_pitch" is only a set of pointers to the "pitch" dataframe
organized_pitch
#Save this data frame as a file which can be called into the plotting script
organized_pitch.to_csv('pitch.csv', sep=',') #columns='time', 'div', 'note', 'freq1', 'freq2')
def make_plot(data):
plt.figure(figsize=(8,5))
fig = plt.plot(organized_pitch['time'], organized_pitch['mean_freq'], color = 'navy')
plt.title('Pitch of C5 Pipe Organ Note')
plt.ylabel('Sound Frequency (Hz)')
plt.xlabel('Time of Sample Taken (Apr. 13, 16 and 17, 2010)')
plt.show()
return(fig)
make_plot(organized_pitch)
#We can see that pitch changes from ~ 523.9-524.7 Hz
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Uploaded data into Python¶
Step2: Next
Step3: These data points can be plotted on top of a calculated pitch line
Step4: I now have an input (raw file) and output (section-selected measured pitch dataframe). This output can be called into my next script, the "env_data.py" file, for comparision between measured and calculated pitch (calculated pitch is calculated from environmental data).
|
12,532
|
<ASSISTANT_TASK:>
Python Code:
# NBVAL_IGNORE_OUTPUT
import numpy as np
import matplotlib.pyplot as plot
import math as mt
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
# NBVAL_IGNORE_OUTPUT
%matplotlib inline
from examples.seismic import TimeAxis
from examples.seismic import RickerSource
from examples.seismic import Receiver
from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator
nptx = 101
nptz = 101
x0 = 0.
x1 = 1000.
compx = x1-x0
z0 = 0.
z1 = 1000.
compz = z1-z0;
hx = (x1-x0)/(nptx-1)
hz = (z1-z0)/(nptz-1)
origin = (x0,z0)
extent = (compx,compz)
shape = (nptx,nptz)
spacing = (hx,hz)
class d0domain(SubDomain):
name = 'd0'
def define(self, dimensions):
x, z = dimensions
return {x: z, z: z}
d0_domain = d0domain()
grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain))
v0 = np.zeros((nptx,nptz))
p0 = 0
p1 = int((1/2)*nptz)
p2 = nptz
v0[0:nptx,p0:p1] = 1.5
v0[0:nptx,p1:p2] = 2.5
def graph2dvel(vel):
plot.figure()
plot.figure(figsize=(16,8))
fscale = 1/10**(3)
scale = np.amax(vel)
extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]
fig = plot.imshow(np.transpose(vel), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.title('Velocity Profile')
plot.grid()
ax = plot.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
cbar.set_label('Velocity [km/s]')
plot.show()
# NBVAL_IGNORE_OUTPUT
graph2dvel(v0)
t0 = 0.
tn = 1000.
CFL = 0.4
vmax = np.amax(v0)
dtmax = np.float64((min(hx,hz)*CFL)/(vmax))
ntmax = int((tn-t0)/dtmax)+1
dt0 = np.float64((tn-t0)/ntmax)
# NBVAL_IGNORE_OUTPUT
time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)
nt = time_range.num - 1
(hxs,hzs) = grid.spacing_map
(x, z) = grid.dimensions
t = grid.stepping_dim
dt = grid.stepping_dim.spacing
f0 = 0.01
nsource = 1
xposf = 0.5*compx
zposf = hz
src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)
src.coordinates.data[:, 0] = xposf
src.coordinates.data[:, 1] = zposf
# NBVAL_IGNORE_OUTPUT
src.show()
nrec = nptx
nxpos = np.linspace(x0,x1,nrec)
nzpos = hz
rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)
rec.coordinates.data[:, 0] = nxpos
rec.coordinates.data[:, 1] = nzpos
u = TimeFunction(name="u",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)
vel0 = Function(name="vel0",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)
vel0.data[:,:] = v0[:,:]
src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)
rec_term = rec.interpolate(expr=u)
pde = Eq(u.dt2 - u.laplace*vel0**2)
stencil = Eq(u.forward, solve(pde,u.forward),subdomain = grid.subdomains['d0'])
bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]
# NBVAL_IGNORE_OUTPUT
op = Operator([stencil] + src_term + bc + rec_term,subs=grid.spacing_map)
u.data[:] = 0.
# NBVAL_IGNORE_OUTPUT
op(time=nt,dt=dt0)
def graph2d(U):
plot.figure()
plot.figure(figsize=(16,8))
fscale = 1/10**(3)
scale = np.amax(U)/10.
extent = [fscale*x0,fscale*x1,fscale*z1,fscale*z0]
fig = plot.imshow(np.transpose(U),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.axis('equal')
plot.title('Map - Acoustic Problem with Devito')
plot.grid()
ax = plot.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
cbar.set_label('Displacement [km]')
plot.draw()
plot.show()
# NBVAL_IGNORE_OUTPUT
graph2d(u.data[0,:,:])
def graph2drec(rec):
plot.figure()
plot.figure(figsize=(16,8))
fscaled = 1/10**(3)
fscalet = 1/10**(3)
scale = np.amax(rec)/10.
extent = [fscaled*x0,fscaled*x1, fscalet*tn, fscalet*t0]
fig = plot.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))
plot.axis('equal')
plot.title('Receivers Signal Profile - Devito')
ax = plot.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
plot.show()
# NBVAL_IGNORE_OUTPUT
graph2drec(rec.data)
assert np.isclose(np.linalg.norm(rec.data), 990, rtol=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From Devito's library of examples, we import some specific functions, such as
Step2: Previously we used the expression configuration ['log-level']='PERF', whose objective is that during execution we can verify the execution parameters of the operators used. We define the spatial parameters to be used in the simulations. In Devito, the mesh data must be defined taking into account meter (m) as the unit of measurement. Thus, we have the following variables
Step3: Once the spatial parameters are defined, we start the construction of the spatial grid using the Devito DSL. The required parameters are
Step4: Once the mesh parameters are built, we can define the structures that represent $\Omega$ subdomains, that is, particular regions of $\Omega$, named subdomains. In the present notebook, there is no need to split the domain in particular subregions, so we define a single subdomain that correponds to the full domain $\Omega$. This subdomain is built with the following command
Step5: After defining the spatial parameters and subdomains, we generate the spatial grid
Step6: The velocity field is set bellow, representing two different layers
Step7: Below we include a routine to plot the velocity field.
Step8: Below we include the plot of velocity field.
Step9: To build the temporal grid we use the command TimeAxis. Before using TimeAxis, we need to set the following parameters
Step10: To generate the temporal grid we use the variables given above and allocate them to some local variables of the TimeAxis function
Step11: Once we have defined the spatial and temporal structures, we need to know the symbolic values that are associated with these grids
Step12: To generate the Ricker source, we set the parameters
Step13: Ricker's source is generated by the class RickerSource whose local variables are
Step14: In order to set the receivers we need to define
Step15: In our case, we choose the number of grid points in the $x$ direction for the number of receivers, which are positioned along the grid line in the $x$ direction at the height $\bar{z}$ = 10m. In this way our variables are chosen as
Step16: Receivers are generated by the command Receiver whose local variables are
Step17: The next step is to create a variable in which we want to allocate the displacement field, which varies in space and time. It will be a field of type TimeFunction. The parameters needed to create a TimeFunction are
Step18: We also need to create a field for the wave propagation velocity, a field which will not change over time. Therefore it is defined as a Function type field. The parameters needed to create a Function are
Step19: Once we have created the Ricker source, we can then create the external force term in the acoustics equation. This insertion of the external force term in the acoustic equation takes into account the discretization of the differential equation. Therefore, it is multiplied by $\Delta^2 t$ and $c^2(x,z)$.
Step20: To define the Receivers we use the structure called rec.interpolate. We need to define the expression expr to
Step21: We now create the acoustic equation without the external force term. As defined previusly, u represents the displacement field and vel0 the field that carries the velocity information.
Step22: For Devito to solve a predefined pde we use the expression solve(pde,field) where the parameters are the equation we want to solve (pde) and the field being updated (field1). In our case, we want to solve the pde in the u.forward field, so we have
Step23: We also need to create the boundary conditions.
Step24: We then define the operator (op) that will combine the acoustic equation with the source term, boundary conditions and receivers.
Step25: Before starting we reset the field u in all its values.
Step26: We assign to op the number of time steps it must execute, using the local variable time, and the size of the time step, using the local variable dt, so that
Step27: To view the result of the displacement field at the end time, let's create a plot routine as follows
Step28: To access the solution in the u field, we will access the 0 position in u.data, that is,
Step29: Realize that the solution has a large amount of noise, which is generated by the reflections at the boudaries. The main objective of this series of notebooks is to present several numerical schemes designed to reduce the wave reflections on the computational boundaries of the domain during simulation.
Step30: To access the result of the displacement in the receivers, we access the term rec.data and pass this term as an argument to the function graph2drec.
Step31: From this plot we can clearly see the reflections of the waves at the lateral boundaries.
|
12,533
|
<ASSISTANT_TASK:>
Python Code:
import word2vec
word2vec.word2phrase('./text8', './text8-phrases', verbose=True)
word2vec.word2vec('./text8-phrases', './text8.bin', size=100, verbose=True)
word2vec.word2clusters('./text8', './text8-clusters.txt', 100, verbose=True)
import word2vec
model = word2vec.load('./text8.bin')
model.vocab
model.vectors.shape
model.vectors
model['dog'].shape
model['dog'][:10]
indexes, metrics = model.cosine('socks')
indexes, metrics
model.vocab[indexes]
model.generate_response(indexes, metrics)
model.generate_response(indexes, metrics).tolist()
indexes, metrics = model.cosine('los_angeles')
model.generate_response(indexes, metrics).tolist()
indexes, metrics = model.analogy(pos=['king', 'woman'], neg=['man'], n=10)
indexes, metrics
model.generate_response(indexes, metrics).tolist()
clusters = word2vec.load_clusters('./text8-clusters.txt')
clusters['dog']
clusters.get_words_on_cluster(90).shape
clusters.get_words_on_cluster(90)[:10]
model.clusters = clusters
indexes, metrics = model.analogy(pos=['paris', 'germany'], neg=['france'], n=10)
model.generate_response(indexes, metrics).tolist()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run word2phrase to group up similar words "Los Angeles" to "Los_Angeles"
Step2: This will create a text8-phrases that we can use as a better input for word2vec.
Step3: That generated a text8.bin file containing the word vectors in a binary format.
Step4: That created a text8-clusters.txt with the cluster for every word in the vocabulary
Step5: Import the word2vec binary file created above
Step6: We can take a look at the vocabulaty as a numpy array
Step7: Or take a look at the whole matrix
Step8: We can retreive the vector of individual words
Step9: We can do simple queries to retreive words similar to "socks" based on cosine similarity
Step10: This returned a tuple with 2 items
Step11: There is a helper function to create a combined response
Step12: Is easy to make that numpy array a pure python response
Step13: Phrases
Step14: Analogies
Step15: Clusters
Step16: We can see get the cluster number for individual words
Step17: We can see get all the words grouped on an specific cluster
Step18: We can add the clusters to the word2vec model and generate a response that includes the clusters
|
12,534
|
<ASSISTANT_TASK:>
Python Code:
names = ['foo','bar','rf']
dates = pd.date_range(start='2015-01-01',end='2018-12-31', freq=pd.tseries.offsets.BDay())
n = len(dates)
rdf = pd.DataFrame(
np.zeros((n, len(names))),
index = dates,
columns = names
)
np.random.seed(1)
rdf['foo'] = np.random.normal(loc = 0.1/252,scale=0.2/np.sqrt(252),size=n)
rdf['bar'] = np.random.normal(loc = 0.04/252,scale=0.05/np.sqrt(252),size=n)
rdf['rf'] = 0.
pdf = 100*np.cumprod(1+rdf)
pdf.plot()
selectTheseAlgo = bt.algos.SelectThese(['foo','bar'])
# algo to set the weights to 1/vol contributions from each asset
# with data over the last 3 months excluding yesterday
weighInvVolAlgo = bt.algos.WeighInvVol(
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=1)
)
# algo to rebalance the current weights to weights set in target.temp
rebalAlgo = bt.algos.Rebalance()
# a strategy that rebalances daily to 1/vol weights
strat = bt.Strategy(
'Target',
[
selectTheseAlgo,
weighInvVolAlgo,
rebalAlgo
]
)
# set integer_positions=False when positions are not required to be integers(round numbers)
backtest = bt.Backtest(
strat,
pdf,
integer_positions=False
)
res_target = bt.run(backtest)
res_target.get_security_weights().plot()
# algo to fire whenever predicted tracking error is greater than 1%
wdf = res_target.get_security_weights()
PTE_rebalance_Algo = bt.algos.PTE_Rebalance(
0.01,
wdf,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
selectTheseAlgo = bt.algos.SelectThese(['foo','bar'])
# algo to set the weights to 1/vol contributions from each asset
# with data over the last 12 months excluding yesterday
weighTargetAlgo = bt.algos.WeighTarget(
wdf
)
rebalAlgo = bt.algos.Rebalance()
# a strategy that rebalances monthly to specified weights
strat = bt.Strategy(
'PTE',
[
PTE_rebalance_Algo,
selectTheseAlgo,
weighTargetAlgo,
rebalAlgo
]
)
# set integer_positions=False when positions are not required to be integers(round numbers)
backtest = bt.Backtest(
strat,
pdf,
integer_positions=False
)
res_PTE = bt.run(backtest)
fig, ax = plt.subplots(nrows=1,ncols=1)
res_target.get_security_weights().plot(ax=ax)
realized_weights_df = res_PTE.get_security_weights()
realized_weights_df['PTE foo'] = realized_weights_df['foo']
realized_weights_df['PTE bar'] = realized_weights_df['bar']
realized_weights_df = realized_weights_df.loc[:,['PTE foo', 'PTE bar']]
realized_weights_df.plot(ax=ax)
ax.set_title('Target Weights vs PTE Weights')
ax.plot()
trans_df = pd.DataFrame(
index=res_target.prices.index,
columns=['Target','PTE']
)
transactions = res_target.get_transactions()
transactions = (transactions['quantity'] * transactions['price']).reset_index()
bar_mask = transactions.loc[:,'Security'] == 'bar'
foo_mask = transactions.loc[:,'Security'] == 'foo'
trans_df.loc[trans_df.index[4:],'Target'] = np.abs(transactions[bar_mask].iloc[:,2].values) + np.abs(transactions[foo_mask].iloc[:,2].values)
transactions = res_PTE.get_transactions()
transactions = (transactions['quantity'] * transactions['price']).reset_index()
bar_mask = transactions.loc[:,'Security'] == 'bar'
foo_mask = transactions.loc[:,'Security'] == 'foo'
trans_df.loc[transactions[bar_mask].iloc[:,0],'PTE'] = np.abs(transactions[bar_mask].iloc[:,2].values)
trans_df.loc[transactions[foo_mask].iloc[:,0],'PTE'] += np.abs(transactions[foo_mask].iloc[:,2].values)
trans_df = trans_df.fillna(0)
fig, ax = plt.subplots(nrows=1,ncols=1)
trans_df.cumsum().plot(ax=ax)
ax.set_title('Cumulative sum of notional traded')
ax.plot()
weights_target = res_target.get_security_weights()
rolling_cov_target = pdf.loc[:,weights_target.columns].pct_change().rolling(window=3*20).cov()*252
weights_PTE = res_PTE.get_security_weights().loc[:,weights_target.columns]
rolling_cov_PTE = pdf.loc[:,weights_target.columns].pct_change().rolling(window=3*20).cov()*252
trc_target = pd.DataFrame(
np.nan,
index = weights_target.index,
columns = weights_target.columns
)
trc_PTE = pd.DataFrame(
np.nan,
index = weights_PTE.index,
columns = [x + " PTE" for x in weights_PTE.columns]
)
for dt in pdf.index:
trc_target.loc[dt,:] = weights_target.loc[dt,:].values*(rolling_cov_target.loc[dt,:].values@weights_target.loc[dt,:].values)/np.sqrt(weights_target.loc[dt,:].values@rolling_cov_target.loc[dt,:].values@weights_target.loc[dt,:].values)
trc_PTE.loc[dt,:] = weights_PTE.loc[dt,:].values*(rolling_cov_PTE.loc[dt,:].values@weights_PTE.loc[dt,:].values)/np.sqrt(weights_PTE.loc[dt,:].values@rolling_cov_PTE.loc[dt,:].values@weights_PTE.loc[dt,:].values)
fig, ax = plt.subplots(nrows=1,ncols=1)
trc_target.plot(ax=ax)
trc_PTE.plot(ax=ax)
ax.set_title('Total Risk Contribution')
ax.plot()
fig, ax = plt.subplots(nrows=1,ncols=1)
trc_target.sum(axis=1).plot(ax=ax,label='Target')
trc_PTE.sum(axis=1).plot(ax=ax,label='PTE')
ax.legend()
ax.set_title('Total Risk')
ax.plot()
transactions = res_PTE.get_transactions()
transactions = (transactions['quantity'] * transactions['price']).reset_index()
bar_mask = transactions.loc[:,'Security'] == 'bar'
dates_of_PTE_transactions = transactions[bar_mask].iloc[:,0]
dates_of_PTE_transactions
fig, ax = plt.subplots(nrows=1,ncols=1)
np.sum(np.abs(trc_target.values - trc_PTE.values))
#.abs().sum(axis=1).plot()
ax.set_title('Total Risk')
ax.plot(
trc_target.index,
np.sum(np.abs(trc_target.values - trc_PTE.values),axis=1),
label='PTE'
)
for i,dt in enumerate(dates_of_PTE_transactions):
if i == 0:
ax.axvline(x=dt,color='red',label='PTE Transaction')
else:
ax.axvline(x=dt,color='red')
ax.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build and run Target Strategy
Step2: Now use the PTE rebalance algo to trigger a rebalance whenever predicted tracking error is greater than 1%.
Step3: If we plot the total risk contribution of each asset class and divide by the total volatility, then we can see that both strategy's contribute roughly similar amounts of volatility from both of the securities.
Step4: Looking at the Target strategy's and PTE strategy's Total Risk they are very similar.
|
12,535
|
<ASSISTANT_TASK:>
Python Code:
x=5
print(x)
import numpy as np
def sin_signal(t, omega=0.1, t0=0.):
A sinusoidal signal
signal = np.sin(omega * (t-t0))
return signal
# let's try
t = np.linspace(0., 100., 10)
# The output of the last entry in a cell gets printed
sin_signal(t)
# Sometimes, the output is too much..
sin_signal(np.linspace(0., 100., 1000))
sin_signal(np.linspace(0., 100., 1000));
np.histogram
np.histogram?
# or ?np.histogram
np.histogram??
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
mpl.rcParams['figure.figsize'] = (8,5)
mpl.rcParams['font.size'] = 18
%%writefile plot.py
t = np.linspace(0., 100., 1000)
plt.plot(t, sin_signal(t))
plt.plot(t, sin_signal(t, omega=0.2))
# plt.plot(t, signal(t, omega=0.2, t0=-15))
plt.xlabel("Time (t)")
plt.ylabel("Signal")
plt.savefig("figure.png")
%pycat plot.py
## Jupyter magics
# This will list all magic commands
%lsmagic
def sum_py(x):
A not very smart sum function
N = len(x)
sum = 0
for i in range(N):
sum += x[i]
return sum
N = 1000
%%timeit
sum_py(np.arange(N, dtype='float64'))
N = 100000
%%timeit
sum_py(np.arange(N, dtype='float64'))
whos
%who function
! ls .
%%bash
for i in `seq 1 10`;
do
echo $i
done
from ipywidgets import interactive
def f(m, b):
plt.figure(2)
x = np.linspace(-10, 10, num=1000)
plt.plot(x, m * x + b)
plt.ylim(-5, 5)
plt.show()
interactive_plot = interactive(f, m=(-2.0, 2.0), b=(-3, 3, 0.5))
output = interactive_plot.children[-1]
output.layout.height = '350px'
interactive_plot
def sin_signal(omega=0.1, t0=0.):
A sinusoidal signal
N = 10000
t = np.linspace(0., 100., N)
signal = np.sin(omega * (t-t0))
plt.plot(t, signal)
plt.show()
return
interactive_plot2 = interactive(sin_signal, omega=(0, 1, 0.1), t0=(-30, 30, 0.5))
output = interactive_plot2.children[-1]
output.layout.height = '350px'
interactive_plot2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hi this is normal text
Step3: We can define functions and call them later
Step4: The output can be suppressed adding a ";" at the end of the command, e.g.
Step5: Text formatting is very handy. It is based on Markdown directives, see here a useful cheatsheet.
Step6: Better with "?" or "??" at the end
Step7: Embedding plots
Step8: Let's plot!
Step9: Jupyter magics
Step11: Example
Step12: You can time any function by simply using the %%timeit magic
Step13: Warning
Step14: Executing code other than Python
Step15: or more compicated bash scripts as
Step17: Widgets
|
12,536
|
<ASSISTANT_TASK:>
Python Code:
import graphlab as gl
import re
import matplotlib.pyplot as plt
gl.canvas.set_target('ipynb')
%matplotlib inline
amazon = gl.SFrame.read_csv('Amazon.csv', verbose=False)
google = gl.SFrame.read_csv('GoogleProducts.csv', verbose=False)
truth = gl.SFrame.read_csv('Amzon_GoogleProducts_perfectMapping.csv', verbose=False)
print 'Amazon length: ', amazon.num_rows()
amazon.head(2)
print 'Google length: ', google.num_rows()
google.head(2)
print 'Truth length: ', truth.num_rows()
truth.head(2)
def transform(truth, amazon, google):
'''Transform the data into a more manageable format'''
# For the sake of this webinar we will look only at the names of the products
amazon = amazon[['id', 'title']]
google = google[['id', 'name']]
# Add a unique numeric label
amazon = amazon.add_row_number(column_name='label')
google = google.add_row_number(column_name='label')
# Change labels in truth based on the new numerical labels
truth = truth.join(amazon, on={'idAmazon' : 'id'})
truth = truth.join(google, on={'idGoogleBase' : 'id'})
# Rename some columns
amazon = amazon.rename({'title' : 'name'})
truth = truth.rename({
'label' : 'amazon label',
'title' : 'amazon name',
'label.1' : 'google label',
'name' : 'google name'
})
# Remove some others
truth.remove_columns(['idGoogleBase', 'idAmazon'])
amazon = amazon.remove_column('id')
google = google.remove_column('id')
return truth, amazon, google
truth, amazon, google = transform(truth, amazon, google)
amazon.head(3)
google.head(3)
truth.head(3)
def accuracy_at(results, truth):
'''Compute the accuracy at k of a record linkage model, given a true mapping'''
joined = truth.join(results, on={'google label' : 'query_label'})
num_correct_labels = (joined['amazon label'] == joined['reference_label']).sum()
return num_correct_labels / float(truth.num_rows())
def get_matches(results, amazon, google):
'''Reutrn the results of a record linkage model in a readable format'''
joined = results.join(amazon, on={'reference_label' : 'label'}).join(google, on={'query_label' : 'label'})
joined = joined[['name', 'name.1', 'distance', 'rank']]
joined = joined.rename({'name' : 'amazon name', 'name.1' : 'google name'})
return joined
base_linker = gl.record_linker.create(amazon, features=['name'])
results = base_linker.link(google, k=3)
results
print 'Accuracy@3', accuracy_at(results, truth)
get_matches(results, amazon, google)
# Example of features that the record linker create
amazon['3 char'] = gl.text_analytics.count_ngrams(amazon['name'], n=3, method='character')
amazon.head(3)
# Remove the feture for the sake of cleanliness
amazon = amazon.remove_column('3 char')
from collections import Counter
# Extract numbers from the name
amazon['numbers'] = amazon['name'].apply(lambda name: dict(Counter(re.findall('\d+\.*\d*', name))))
google['numbers'] = google['name'].apply(lambda name: dict(Counter(re.findall('\d+\.*\d*', name))))
amazon.head(5)
# Create a record linker using the extracted numeric features
num_linker = gl.record_linker.create(amazon, features=['name', 'numbers'])
results = num_linker.link(google, k=3, verbose=False)
print 'Accuracy@3', accuracy_at(results, truth)
get_matches(results, amazon, google)
# Calculate accuracy at k for k between 1 and 10 for both models
k_range = range(1, 11)
base_accuracy = [accuracy_at(base_linker.link(google, k, verbose=False), truth) for k in k_range]
num_accuracy = [accuracy_at(num_linker.link(google, k, verbose=False), truth) for k in k_range]
# Plot the results
plt.style.use('ggplot')
plt.title('Accuracy@k')
plt.ylabel('Accuracy')
plt.xlabel('k')
plt.plot(k_range, base_accuracy, marker='o', color='b', label='Base Linker')
plt.plot(k_range, num_accuracy, marker='o', color='g', label='Number Linker')
plt.legend(loc=4)
None
results = num_linker.link(google, k=10, verbose=False)
print 'Accuracy:', accuracy_at(results, truth)
print 'Possible number of results to go through:', len(results)
results['distance'].show()
results = num_linker.link(google, k=None, radius=1.61, verbose=False)
print 'Accuracy:', accuracy_at(results, truth)
print 'Possible number of results to go through:', len(results)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id="eval"></a> Evaluation functions
Step2: <a id="model"></a> Record Linker model
Step3: <a id="feature"></a> Feature Engineering
Step4: <b>In product matching, numbers can be highly helpful as they can represent model identifiers, versions, ect.</b>
Step5: <a id="k"></a> Exploring k
Step6: <b>
|
12,537
|
<ASSISTANT_TASK:>
Python Code:
from pymldb import Connection
mldb = Connection("http://localhost/")
print mldb.put('/v1/procedures/import_rcp', {
"type": "import.text",
"params": {
"headers": ["user_id", "recipe_id"],
"dataFileUrl": "file://mldb/mldb_test_data/favorites.csv.gz",
"outputDataset": "rcp_raw",
"runOnCreation": True
}
})
print mldb.post('/v1/procedures', {
"id": "rcp_import",
"type": "transform",
"params": {
"inputData": "select pivot(recipe_id, 1) as * named user_id from rcp_raw group by user_id",
"outputDataset": "recipes",
"runOnCreation": True
}
})
print mldb.post('/v1/procedures', {
"id": "rcp_svd",
"type" : "svd.train",
"params" : {
"trainingData": "select * from recipes",
"columnOutputDataset" : "rcp_svd_embedding_raw",
"runOnCreation": True
}
})
num_centroids = 16
print mldb.post('/v1/procedures', {
"id" : "rcp_kmeans",
"type" : "kmeans.train",
"params" : {
"trainingData" : "select * from rcp_svd_embedding_raw",
"outputDataset" : "rcp_kmeans_clusters",
"centroidsDataset" : "rcp_kmeans_centroids",
"numClusters" : num_centroids,
"runOnCreation": True
}
})
print mldb.put('/v1/procedures/import_rcp_names_raw', {
'type': 'import.text',
'params': {
'dataFileUrl': 'file://mldb/mldb_test_data/recipes.csv.gz',
'outputDataset': "rcp_names_raw",
'delimiter':'',
'quoteChar':'',
'runOnCreation': True
}
})
print mldb.put('/v1/procedures/rcp_names_import', {
'type': 'transform',
'params': {
'inputData': '''
select jseval(
'return s.substr(s.indexOf(",") + 1)
.replace(/"/g, "")
.replace(/®/g, "");',
's', lineText) as name
named implicit_cast(rowName()) - 1
from rcp_names_raw
''',
'outputDataset': 'rcp_names',
'runOnCreation': True
}
})
print mldb.put('/v1/procedures/rcp_clean_svd', {
'type': 'transform',
'params': {
'inputData':
select rcp_svd_embedding_raw.* as *
named rcp_names.rowName()+'-'+rcp_names.name
from rcp_svd_embedding_raw
join rcp_names on (rcp_names.rowName() = rcp_svd_embedding_raw.rowPathElement(0))
,
'outputDataset': {'id': 'rcp_svd_embedding',
'type': 'embedding',
'params': {'metric': 'cosine'}},
'runOnCreation': True
}
})
mldb.put("/v1/functions/nearestRecipe", {
"type":"embedding.neighbors",
"params": { "dataset": "rcp_svd_embedding", "defaultNumNeighbors": 3 }
})
mldb.query(
select nearestRecipe({coords: {*}})[neighbors] as * from rcp_kmeans_centroids
).applymap(lambda x: x.split('-')[1])
print mldb.put('/v1/procedures/sum_words_per_cluster', {
'type': 'transform',
'params': {
'inputData':
select sum({tokens.* as *}) as *
named c.cluster
from (
SELECT lower(n.name),
tokenize('recipe ' + lower(n.name), {splitChars:' -.;&!''()",', minTokenLength: 4}) as tokens,
c.cluster
FROM rcp_names as n
JOIN rcp_kmeans_clusters as c ON (n.rowName() = c.rowPathElement(0))
order by n.rowName()
)
group by c.cluster
,
'outputDataset': 'rcp_cluster_word_counts',
'runOnCreation': True
}
})
mldb.query(select * from rcp_cluster_word_counts order by implicit_cast(rowName()))
print mldb.put('/v1/procedures/train_tfidf', {
'type': 'tfidf.train',
'params': {
'trainingData': "select * from rcp_cluster_word_counts",
'modelFileUrl': 'file:///mldb_data/models/rcp_tfidf.idf',
'runOnCreation': True
}
})
print mldb.put('/v1/functions/rcp_tfidf', {
'type': 'tfidf',
'params': {
'modelFileUrl': 'file:///mldb_data/models/rcp_tfidf.idf',
'tfType': 'log', 'idfType': 'inverse'
}
})
print mldb.put('/v1/procedures/apply_tfidf', {
'type': 'transform',
'params': {
'inputData': "select rcp_tfidf({input: {*}})[output] as * from rcp_cluster_word_counts",
'outputDataset': 'rcp_cluster_word_scores',
'runOnCreation': True
}
})
mldb.query("select * from rcp_cluster_word_scores order by implicit_cast(rowName())")
import json
from ipywidgets import interact
from IPython.display import IFrame, display
html =
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.6/d3.min.js"></script>
<script src="https://static.mldb.ai/d3.layout.cloud.js"></script>
<script src="https://static.mldb.ai/wordcloud.js"></script>
<body> <script>drawCloud(%s)</script> </body>
@interact
def cluster_word_cloud(cluster=[0, num_centroids-1]):
num_words = 20
cluster_words = mldb.get(
'/v1/query',
q=
SELECT rowName() as text
FROM transpose(rcp_cluster_word_scores)
ORDER BY "{0}" DESC
LIMIT {1}
.format(cluster, num_words),
format='aos',
rowNames=0
).json()
for i,x in enumerate(cluster_words):
x['size'] = num_words - i
display( IFrame("data:text/html," + (html % json.dumps(cluster_words)).replace('"',"'"), 850, 350) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The sequence of procedures below is based on the one explained in the Mapping Reddit demo notebook.
Step2: We then train an SVD decomposition and do K-Means clustering
Step4: Now we import the actual recipe names, clean them up a bit, and get a version of our SVD embedding with the recipe names as column names.
Step6: With all that pre-processing done, let's look at the names of the 3 closest recipes to each cluster centroid to try to get a sense of what kind of clusters we got.
Step9: We can see a bit of pattern just from the names of the recipes nearest to the centroids, but we can probably do better! Let's try to extract the most characteristic words used in the recipe names for each cluster.
Step10: We can use this to create a TF-IDF score for each word in the cluster. Basically this score will give us an idea of the relative importance of a each word in a given cluster.
Step13: If we transpose that dataset, we will be able to get the highest scored words for each cluster, and we can display them nicely in a word cloud.
|
12,538
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-3', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
12,539
|
<ASSISTANT_TASK:>
Python Code:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", reshape=False)
X_train, y_train = mnist.train.images, mnist.train.labels
X_validation, y_validation = mnist.validation.images, mnist.validation.labels
X_test, y_test = mnist.test.images, mnist.test.labels
assert(len(X_train) == len(y_train))
assert(len(X_validation) == len(y_validation))
assert(len(X_test) == len(y_test))
print()
print("Image Shape: {}".format(X_train[0].shape))
print()
print("Training Set: {} samples".format(len(X_train)))
print("Validation Set: {} samples".format(len(X_validation)))
print("Test Set: {} samples".format(len(X_test)))
import numpy as np
# Pad images with 0s
X_train = np.pad(X_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_validation = np.pad(X_validation, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_test = np.pad(X_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')
print("Updated Image Shape: {}".format(X_train[0].shape))
import random
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image, cmap="gray")
print(y_train[index])
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
import tensorflow as tf
EPOCHS = 10
BATCH_SIZE = 128
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Hyperparameters
mu = 0
sigma = 0.1
dropout = 0.75
# TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
weights = {
'wc1': tf.Variable(tf.random_normal([5,5,1,6])),
'wc2': tf.Variable(tf.random_normal([5,5,6,16])),
'wd1': tf.Variable(tf.random_normal([400, 120])),
'wd2': tf.Variable(tf.random_normal([120, 84])),
'wd3': tf.Variable(tf.random_normal([84, 10]))}
biases = {
'bc1': tf.Variable(tf.zeros(6)),
'bc2': tf.Variable(tf.zeros(16)),
'bd1': tf.Variable(tf.zeros(120)),
'bd2': tf.Variable(tf.zeros(84)),
'bd3': tf.Variable(tf.zeros(10))}
conv1 = tf.nn.conv2d(x, weights['wc1'], strides=[1, 1, 1, 1], padding='VALID')
conv1 = tf.nn.bias_add(conv1, biases['bc1'])
# TODO: Activation.
conv1 = tf.nn.relu(conv1)
# TODO: Pooling. Input = 28x28x6. Output = 14x14x6.
ksize = [1,2,2,1]
strides = [1,2,2,1]
padding = 'VALID'
conv1 = tf.nn.max_pool(conv1, ksize, strides, padding)
# TODO: Layer 2: Convolutional. Output = 10x10x16.
conv2 = tf.nn.conv2d(conv1, weights['wc2'], strides=[1, 1, 1, 1], padding='VALID')
conv2 = tf.nn.bias_add(conv2, biases['bc2'])
# TODO: Activation.
conv2 = tf.nn.relu(conv2)
# TODO: Pooling. Input = 10x10x16. Output = 5x5x16.
ksize = [1,2,2,1]
strides = [1,2,2,1]
padding = 'VALID'
conv2 = tf.nn.max_pool(conv2, ksize, strides, padding)
# TODO: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# TODO: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1 = tf.add(tf.matmul(fc0, weights['wd1']), biases['bd1'])
# TODO: Activation.
fc1 = tf.nn.relu(fc1)
# TODO: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])
# TODO: Activation.
fc2 = tf.nn.relu(fc2)
# TODO: Layer 5: Fully Connected. Input = 84. Output = 10.
logits = tf.add(tf.matmul(fc2, weights['wd3']), biases['bd3'])
return logits
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 10)
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, 'lenet')
print("Model saved")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The MNIST data that TensorFlow pre-loads comes as 28x28x1 images.
Step2: Visualize Data
Step3: Preprocess Data
Step4: Setup TensorFlow
Step5: TODO
Step6: Features and Labels
Step7: Training Pipeline
Step8: Model Evaluation
Step9: Train the Model
Step10: Evaluate the Model
|
12,540
|
<ASSISTANT_TASK:>
Python Code:
import logging
logging.basicConfig(level = logging.INFO)
import smurff
import numpy as np
import scipy.sparse as sp
def gen_matrix(shape, num_latent, density = 1.0 ):
Generate a matrix by multipling two factors.
Sparsify if asked.
X = np.random.normal(size=(shape[0],num_latent))
W = np.random.normal(size=(shape[1],num_latent))
Y = np.dot(X, W.transpose()) + np.random.normal(size=shape)
if density < 1.0:
Y = sp.coo_matrix(Y)
_, Y = smurff.make_train_test(Y, density)
return Y, X ,W
Y, X, W = gen_matrix((20, 30), 2, 0.8)
Ytrain, Ytest = smurff.make_train_test(Y, 0.1)
##### train the model
# using X and W as sideinfo
session = smurff.MacauSession(
Ytrain = Ytrain,
Ytest = Ytest,
side_info = [X, W],
direct = True,
num_latent = 4,
burnin = 40,
nsamples = 100,
save_freq = 1,
save_name = "syn-out-matrix-pred.hdf5",
verbose = 0,)
predictions = session.run()
# predictor = smurff.PredictSession.fromRootFile("syn-out-matrix-pred/root.ini")
predictor = session.makePredictSession()
print(predictor)
# predict with a single feature-vector and for one column with PredictSession.predict
# return type: smurff.Prediction
p = predictor.predict((X[0],0))
print(p)
# predict with many feature-vectors in a np.ndarray and for all columns with PredictSession.predict
# return type: numpy.ndarray
p = predictor.predict((X,Ellipsis))
print(p)
# predict with many feature-vectors in a np.ndarray and for a single columns with PredictSession.predict
# return type: numpy.ndarray
p = predictor.predict((X[1:5],10))
print(p)
# predict with a single feature-vector and for one row with PredictSession.predict
p = predictor.predict((0, W[0]))
print(p)
# predict with many feature-vectors in a np.ndarray and for all rows with PredictSession.predict
# return type: numpy.ndarray
p = predictor.predict((None, W))
print(p)
# predict with many feature-vectors in a np.ndarray and for a single row with PredictSession.predict
# return type: numpy.ndarray
p = predictor.predict((10,W[1:5]))
print(p)
# predict with a single feature-vector for the rows, and for the columns
p = predictor.predict((X[0], W[0]))
print(p)
# predict with many feature-vectors in a np.ndarray PredictSession.predict
# return type: numpy.ndarray
p = predictor.predict((X, W))
print(p)
# predict with many feature-vectors in a np.ndarray and for a single row with PredictSession.predict
# return type: numpy.ndarray
p = predictor.predict((X[1:4],W[1:5]))
print(p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Out-of-matrix prediction using synthetic data
Step2: Train the model
Step3: Make a PredictSession
Step4: Out-of-matrix prediction using side-info for the rows
Step5: Out-of-matrix prediction using side-info for the columns
Step6: Out-of-matrix prediction using side-info for rows and columns
|
12,541
|
<ASSISTANT_TASK:>
Python Code:
!ls -lh ../data/reads
!gunzip -c ../data/reads/mutant1_OIST-2015-03-28.fq.gz | head -8
!fastqc ../data/reads/mutant1_OIST-2015-03-28.fq.gz
from IPython.display import IFrame
IFrame('../data/reads/mutant1_OIST-2015-03-28_fastqc.html', width=1000, height=1000)
import gzip
from Bio import SeqIO
with gzip.open("../data/reads/mutant1_OIST-2015-03-28.fq.gz", 'rt') as infile: # open and decompress input file
for rec in SeqIO.parse(infile, "fastq"): # start looping over all records
print(rec) #print record contents
break # stop looping, we only want to see one record
print(dir(rec)) # print methods associaat
rec.reverse_complement()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We see that there are five files four of these are mutants, and and one reference original sample.
Step2: Each read in the fastq file format has four lines, one is a unique read name, one containing the sequence of bases, one +, and one containing quality scores. The quality scores correspond to the sequencer's confidence in making the base call.
Step3: Key statistics
Step4: You can see the methods associated with each object, suce as rec usig the dir command.
Step5: For example, we can reverse complement the sequence
|
12,542
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from scipy.io import loadmat
from shogun import features, MulticlassLabels, Math
# load the dataset
dataset = loadmat(os.path.join(SHOGUN_DATA_DIR, 'multiclass/usps.mat'))
Xall = dataset['data']
# the usps dataset has the digits labeled from 1 to 10
# we'll subtract 1 to make them in the 0-9 range instead
Yall = np.array(dataset['label'].squeeze(), dtype=np.double)-1
# 4000 examples for training
Xtrain = features(Xall[:,0:4000])
Ytrain = MulticlassLabels(Yall[0:4000])
# the rest for testing
Xtest = features(Xall[:,4000:-1])
Ytest = MulticlassLabels(Yall[4000:-1])
# initialize the random number generator with a fixed seed, for repeatability
Math.init_random(10)
from shogun import NeuralLayers, DeepAutoencoder
layers = NeuralLayers()
layers = layers.input(256).rectified_linear(512).rectified_linear(128).rectified_linear(512).linear(256).done()
ae = DeepAutoencoder(layers)
from shogun import AENT_DROPOUT, NNOM_GRADIENT_DESCENT
ae.pt_noise_type.set_const(AENT_DROPOUT) # use dropout noise
ae.pt_noise_parameter.set_const(0.5) # each input has a 50% chance of being set to zero
ae.pt_optimization_method.set_const(NNOM_GRADIENT_DESCENT) # train using gradient descent
ae.pt_gd_learning_rate.set_const(0.01)
ae.pt_gd_mini_batch_size.set_const(128)
ae.pt_max_num_epochs.set_const(50)
ae.pt_epsilon.set_const(0.0) # disable automatic convergence testing
# uncomment this line to allow the training progress to be printed on the console
#from shogun import MSG_INFO; ae.io.set_loglevel(MSG_INFO)
# start pre-training. this might take some time
ae.pre_train(Xtrain)
ae.put('noise_type', AENT_DROPOUT) # same noise type we used for pre-training
ae.put('noise_parameter', 0.5)
ae.put('max_num_epochs', 50)
ae.put('optimization_method', NNOM_GRADIENT_DESCENT)
ae.put('gd_mini_batch_size', 128)
ae.put('gd_learning_rate', 0.0001)
ae.put('epsilon', 0.0)
# start fine-tuning. this might take some time
_ = ae.train(Xtrain)
# get a 50-example subset of the test set
subset = Xtest[:,0:50].copy()
# corrupt the first 25 examples with multiplicative noise
subset[:,0:25] *= (random.random((256,25))>0.5)
# corrupt the other 25 examples with additive noise
subset[:,25:50] += random.random((256,25))
# obtain the reconstructions
reconstructed_subset = ae.reconstruct(features(subset))
# plot the corrupted data and the reconstructions
figure(figsize=(10,10))
for i in range(50):
ax1=subplot(10,10,i*2+1)
ax1.imshow(subset[:,i].reshape((16,16)), interpolation='nearest', cmap = cm.Greys_r)
ax1.set_xticks([])
ax1.set_yticks([])
ax2=subplot(10,10,i*2+2)
ax2.imshow(reconstructed_subset[:,i].reshape((16,16)), interpolation='nearest', cmap = cm.Greys_r)
ax2.set_xticks([])
ax2.set_yticks([])
# obtain the weights matrix of the first hidden layer
# the 512 is the number of biases in the layer (512 neurons)
# the transpose is because numpy stores matrices in row-major format, and Shogun stores
# them in column major format
w1 = ae.get_layer_parameters(1)[512:].reshape(256,512).T
# visualize the weights between the first 100 neurons in the hidden layer
# and the neurons in the input layer
figure(figsize=(10,10))
for i in range(100):
ax1=subplot(10,10,i+1)
ax1.imshow(w1[i,:].reshape((16,16)), interpolation='nearest', cmap = cm.Greys_r)
ax1.set_xticks([])
ax1.set_yticks([])
from shogun import NeuralSoftmaxLayer
nn = ae.convert_to_neural_network(NeuralSoftmaxLayer(10))
nn.put('max_num_epochs', 50)
nn.put('labels', Ytrain)
_ = nn.train(Xtrain)
from shogun import MulticlassAccuracy
predictions = nn.apply_multiclass(Xtest)
accuracy = MulticlassAccuracy().evaluate(predictions, Ytest) * 100
print("Classification accuracy on the test set =", accuracy, "%")
from shogun import DynamicObjectArray, NeuralInputLayer, NeuralConvolutionalLayer, CMAF_RECTIFIED_LINEAR
conv_layers = DynamicObjectArray()
# 16x16 single channel images
conv_layers.append_element(NeuralInputLayer(16,16,1))
# the first encoding layer: 5 feature maps, filters with radius 2 (5x5 filters)
# and max-pooling in a 2x2 region: its output will be 10 8x8 feature maps
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 5, 2, 2, 2, 2))
# the second encoding layer: 15 feature maps, filters with radius 2 (5x5 filters)
# and max-pooling in a 2x2 region: its output will be 20 4x4 feature maps
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 15, 2, 2, 2, 2))
# the first decoding layer: same structure as the first encoding layer
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 5, 2, 2))
# the second decoding layer: same structure as the input layer
conv_layers.append_element(NeuralConvolutionalLayer(CMAF_RECTIFIED_LINEAR, 1, 2, 2))
conv_ae = DeepAutoencoder(conv_layers)
conv_ae.pt_noise_type.set_const(AENT_DROPOUT) # use dropout noise
conv_ae.pt_noise_parameter.set_const(0.3) # each input has a 30% chance of being set to zero
conv_ae.pt_optimization_method.set_const(NNOM_GRADIENT_DESCENT) # train using gradient descent
conv_ae.pt_gd_learning_rate.set_const(0.002)
conv_ae.pt_gd_mini_batch_size.set_const(100)
conv_ae.pt_max_num_epochs[0] = 30 # max number of epochs for pre-training the first encoding layer
conv_ae.pt_max_num_epochs[1] = 10 # max number of epochs for pre-training the second encoding layer
conv_ae.pt_epsilon.set_const(0.0) # disable automatic convergence testing
# start pre-training. this might take some time
conv_ae.pre_train(Xtrain)
conv_nn = ae.convert_to_neural_network(NeuralSoftmaxLayer(10))
# train the network
conv_nn.put('epsilon', 0.0)
conv_nn.put('max_num_epochs', 50)
conv_nn.put('labels', Ytrain)
# start training. this might take some time
_ = conv_nn.train(Xtrain)
predictions = conv_nn.apply_multiclass(Xtest)
accuracy = MulticlassAccuracy().evaluate(predictions, Ytest) * 100
print("Classification accuracy on the test set =", accuracy, "%")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating the autoencoder
Step2: Pre-training
Step3: Fine-tuning
Step4: Evaluation
Step5: The figure shows the corrupted examples and their reconstructions. The top half of the figure shows the ones corrupted with multiplicative noise, the bottom half shows the ones corrupted with additive noise. We can see that the autoencoders can provide decent reconstructions despite the heavy noise.
Step6: Now, we can use the autoencoder to initialize a supervised neural network. The network will have all the layer of the autoencoder up to (and including) the middle layer. We'll also add a softmax output layer. So, the network will look like
Step7: Next, we'll evaluate the accuracy on the test set
Step8: Convolutional Autoencoders
Step9: Now we'll pre-train the autoencoder
Step10: And then convert the autoencoder to a regular neural network for classification
Step11: And evaluate it on the test set
|
12,543
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df_red = pd.read_csv("data/winequality-red.csv",sep=";")
df_white = pd.read_csv("data/winequality-white.csv",sep=";")
# Add the type column
df_red['type'] = 1
df_white['type'] = 0
df = pd.concat([df_red,df_white], axis=0)
df.describe()
import matplotlib.pyplot as plt
df.boxplot()
plt.show()
import matplotlib.pyplot as plt
import matplotlib
df.hist()
plt.show()
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# Reemplazo de quality por good quality
df['good_quality'] = [1 if q > 5 else 0 for q in df.quality]
df = df.drop('quality', axis=1)
# Reordenamiento aleatorio
df_randomized = df.sample(frac=1, random_state=3850)
# Normalización
scaler = MinMaxScaler()
cols = df_randomized.columns.tolist()
cols.remove('good_quality')
df_randomized[cols] = scaler.fit_transform(df_randomized[cols])
# División
(Xtrain, X_general_tests) = train_test_split(df_randomized, test_size=0.5, random_state=8500)
(Xvalidation, Xtest) = train_test_split(X_general_tests, test_size=0.5, random_state=10500)
Ytrain = Xtrain['good_quality']
Ytest = Xtest['good_quality']
Yvalidation = Xvalidation['good_quality']
Xtrain = Xtrain.drop('good_quality', axis=1)
Xtest = Xtest.drop('good_quality', axis=1)
Xvalidation = Xvalidation.drop('good_quality', axis=1)
from sklearn.tree import DecisionTreeClassifier as Tree
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
def train_tree(depth, Xtrain, Ytrain, Xtest, Ytest):
# Entrenar el árbol
model = Tree()
model.set_params(max_depth=depth, criterion='gini', splitter='best')
model.fit(Xtrain, Ytrain)
Ytrain_pred = model.predict(Xtrain)
Ytest_pred = model.predict(Xtest)
train_error = (1-accuracy_score(Ytrain, Ytrain_pred))
test_error = (1-accuracy_score(Ytest, Ytest_pred))
train_f1 = f1_score(Ytrain, Ytrain_pred)
test_f2 = f1_score(Ytest, Ytest_pred)
return (train_error, test_error, train_f1, test_f2)
def graph_tree_range(params):
train_errors = []
test_errors = []
f1_train_errors = []
f1_test_errors = []
for depth in params:
(train, test, f1train, f1test) = train_tree(depth, Xtrain, Ytrain, Xtest, Ytest)
train_errors.append(train)
test_errors.append(test)
f1_train_errors.append(f1train)
f1_test_errors.append(f1test)
plt.figure(figsize=(10, 8))
plt.plot(params, f1_train_errors, label="Train F1 Score")
plt.plot(params, f1_test_errors, label="Test F1 Score")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('Profundidad del árbol')
plt.ylabel('Error')
plt.show()
params = np.arange(1, 100, 1)
graph_tree_range(params)
params = np.arange(1, 20, 1)
graph_tree_range(params)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
def train_random_forest(estimators, depth, Xtrain, Ytrain, Xtest, Ytest):
# Entrenar el árbol
model = RandomForestClassifier(n_estimators=estimators, max_depth=depth,n_jobs=-1)
model.fit(Xtrain, Ytrain)
Ytrain_pred = model.predict(Xtrain)
Ytest_pred = model.predict(Xtest)
train_error = (1-accuracy_score(Ytrain, Ytrain_pred))
test_error = (1-accuracy_score(Ytest, Ytest_pred))
train_f1 = f1_score(Ytrain, Ytrain_pred)
test_f2 = f1_score(Ytest, Ytest_pred)
return (train_error, test_error, train_f1, test_f2)
def graph_random_forest_range(estimator_params, depth_params, z_to_show, upper_estimator, upper_depth):
points = []
for a in estimator_params:
for b in depth_params:
points.append((a, b))
train_errors = []
test_errors = []
f1_train_errors = []
f1_test_errors = []
for point in points:
(train, test, f1train, f1test) = train_random_forest(point[0], point[1], Xtrain, Ytrain, Xtest, Ytest)
train_errors.append(train)
test_errors.append(test)
f1_train_errors.append(f1train)
f1_test_errors.append(f1test)
x = []
y = []
z1 = []
z2 = []
for pt in points:
index = points.index(pt)
x.append(pt[0])
y.append(pt[1])
z1.append(f1_train_errors[index])
z2.append(f1_test_errors[index])
z = None
if z_to_show == 0:
z = z1
else:
z = z2
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1)
cmap = matplotlib.cm.get_cmap("jet")
pts = ax.scatter(x, y, s=60, c=z, cmap=cmap,
linewidth=0)
cbar = fig.colorbar(pts, ax=ax)
fig.axes[-1].set_ylabel("Z")
ax.grid()
ax.set_xlabel("Número de estimadores")
ax.set_ylabel("Profundidad máxima")
ax.set_xlim(0, upper_estimator + 1)
ax.set_ylim(0, upper_depth + 1)
if z_to_show == 0:
ax.set_title("F1 train score")
else:
ax.set_title("F1 test score")
for i, txt in enumerate(z):
ax.annotate("{0:.2f}".format(txt), (x[i],y[i]))
plt.show()
estimator_params = np.arange(1, 100, 10)
depth_params = np.arange(1, 20, 1)
graph_random_forest_range(estimator_params, depth_params, 0, 100, 20)
graph_random_forest_range(estimator_params, depth_params, 1, 100, 20)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
def train_ada_boost(estimators, depth, Xtrain, Ytrain, Xtest, Ytest):
# Entrenar el árbol
model = AdaBoostClassifier(base_estimator=Tree(max_depth=depth), n_estimators=estimators)
model.fit(Xtrain, Ytrain)
Ytrain_pred = model.predict(Xtrain)
Ytest_pred = model.predict(Xtest)
train_error = (1-accuracy_score(Ytrain, Ytrain_pred))
test_error = (1-accuracy_score(Ytest, Ytest_pred))
train_f1 = f1_score(Ytrain, Ytrain_pred)
test_f2 = f1_score(Ytest, Ytest_pred)
return (train_error, test_error, train_f1, test_f2)
def graph_ada_boost_range(estimator_params, depth_params, z_to_show, upper_estimator, upper_depth):
points = []
for a in estimator_params:
for b in depth_params:
points.append((a, b))
train_errors = []
test_errors = []
f1_train_errors = []
f1_test_errors = []
for point in points:
(train, test, f1train, f1test) = train_ada_boost(point[0], point[1], Xtrain, Ytrain, Xtest, Ytest)
train_errors.append(train)
test_errors.append(test)
f1_train_errors.append(f1train)
f1_test_errors.append(f1test)
x = []
y = []
z1 = []
z2 = []
for pt in points:
index = points.index(pt)
x.append(pt[0])
y.append(pt[1])
z1.append(f1_train_errors[index])
z2.append(f1_test_errors[index])
z = None
if z_to_show == 0:
z = z1
else:
z = z2
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1)
cmap = matplotlib.cm.get_cmap("jet")
pts = ax.scatter(x, y, s=60, c=z, cmap=cmap,
linewidth=0)
cbar = fig.colorbar(pts, ax=ax)
fig.axes[-1].set_ylabel("Z")
ax.grid()
ax.set_xlabel("Número de estimadores")
ax.set_ylabel("Profundidad máxima")
ax.set_xlim(0, upper_estimator+1)
ax.set_ylim(0, upper_depth+1)
if z_to_show == 0:
ax.set_title("F1 train score")
else:
ax.set_title("F1 test score")
for i, txt in enumerate(z):
ax.annotate("{0:.2f}".format(txt), (x[i],y[i]))
plt.show()
estimator_params = np.arange(1, 100, 10)
depth_params = np.arange(1, 20, 1)
graph_ada_boost_range(estimator_params, depth_params, 0, 100, 20)
graph_ada_boost_range(estimator_params, depth_params, 1, 100, 20)
from sklearn.svm import SVC as SVM
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
def train_svm(param, Xtrain, Ytrain, Xtest, Ytest):
# Entrenar el árbol
model= SVM()
model.set_params(C=param, kernel='rbf')
model.fit(Xtrain, Ytrain)
Ytrain_pred = model.predict(Xtrain)
Ytest_pred = model.predict(Xtest)
train_error = (1-accuracy_score(Ytrain, Ytrain_pred))
test_error = (1-accuracy_score(Ytest, Ytest_pred))
train_f1 = f1_score(Ytrain, Ytrain_pred)
test_f2 = f1_score(Ytest, Ytest_pred)
return (train_error, test_error, train_f1, test_f2)
def graph_svm_range(params):
train_errors = []
test_errors = []
f1_train_errors = []
f1_test_errors = []
for depth in params:
(train, test, f1train, f1test) = train_svm(depth, Xtrain, Ytrain, Xtest, Ytest)
train_errors.append(train)
test_errors.append(test)
f1_train_errors.append(f1train)
f1_test_errors.append(f1test)
plt.figure(figsize=(10, 8))
plt.plot(params, f1_train_errors, label="Train F1 Score")
plt.plot(params, f1_test_errors, label="Test F1 Score")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('RBF')
plt.ylabel('Error')
plt.show()
params = np.arange(0.001, 1.0, 0.03)
graph_svm_range(params)
print(train_tree(16, Xtrain, Ytrain, Xvalidation, Yvalidation)[3])
print(train_random_forest(60, 16, Xtrain, Ytrain, Xvalidation, Yvalidation)[3])
print(train_ada_boost(60, 16, Xtrain, Ytrain, Xvalidation, Yvalidation)[3])
print(train_svm(0.3, Xtrain, Ytrain, Xvalidation, Yvalidation)[3])
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
model = RandomForestClassifier(n_estimators=10, max_depth=18,n_jobs=-1)
model.fit(Xtrain, Ytrain)
cols = model.feature_importances_
labels = df_randomized.columns.tolist()
joined = []
for i in range(0, len(df_randomized.columns)-1):
joined.append((df_randomized.columns[i], model.feature_importances_[i]))
importance = pd.DataFrame(joined)
importance.sort_values(by=1, ascending=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Las variables de entrada utilizadas se basan en pruebas fisioquímicas de los vinos y corresponden a
Step2: Se puede notar la existencia de una gran cantidad de datos outliers en el dataset que son necesarios para lograr hacer que las máquinas reconozcan cómo valores absurdos de las variables afectan la calidad de los vinos. Por tanto, no se eliminarán
Step3: Arbol de clasificación
Step4: Haciendo "zoom" hacia valores pequeños de profundidad tenemos
Step5: Se puede notar que una vez que se alcanza una profundidad aproximada de 20 hijos en el árbol, el error de entrenamiento tiende a 0 y el error de testing no sufre mayores cambios. Para evitar computaciones extras, es mejor utilizar esta profunidad.
Step6: El puntaje más alto sobre el conjunto de testing corresponde a 0.88 y ocurre con alrededor de 90 estimadores y una profundidad superior a 15.
Step7: A diferencia de Random Forest, se necesita una cantidad menor de estimadores para alcanzar el puntaje máximo del conjunto de testing, y se mantiene la tendencia que indica que es la profundidad del árbol, o bien, los hiperparámetros de los algoritmos que se utilizan para el ensamblado los que impactan más en la calidad del aprendizaje
Step8: Se tiene que la SVM obtiene un puntaje de testing cercano a 0.795, para luego tener un puntaje de testing mayor al de entrenamiento que se puede considerar como overfitting. El accuracy es mucho menor que el puntaje obtenido por los ensambladores. En general, los ensambladores tendran un mejor desempeño que los algoritmos clásicos, siempre y cuando los algoritmos que se utilizan para el ensamblado funcionen correctamente en el conjunto de datos seleccionado.
Step9: Así, el mejor algoritmo corresponde a RandomForest
|
12,544
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.zeros(9).reshape(3,3) # 3 x 3 matrix filled with zeros
print(a)
print(a.dtype)
a1 = np.zeros((3, 3)) # same matrix, note the tuple parameter
b = np.random.rand(6) # random numbers between 0-1
print(b)
a1 = np.full((3, 4), 8)
print(a1)
i = np.eye(4)
print(i)
c = np.array([[1, 2, 3], [2, 4, 6]], dtype=np.int32) # default data type is float64
print(c.shape) # return a tuple
print(c.size)
d = np.arange(10) # integer values from 0 to 9
print(d)
e = np.arange(2, 11, 2) # even numbers from 2 to 10
print(e)
f = np.arange(0.1, 1, 0.1) # for float numbers
print(f)
f1 = np.linspace(0.1, 0.9, 9) # same as above but start, end, number of items
print(f1)
t1 = np.arange(80).reshape(10, 8)
print(t1)
print(t1[0, 0]) # first row, first column
print(t1[0][0]) # same as above
print(t1[2]) # third row
print(t1[:,1]) # second column
print(t1[::2]) # odd rows (every second)
print(t1[t1 % 3 == 0]) # elements divisible by three
a1 = np.full((3, 4), 8)
a2 = np.arange(12).reshape(3, 4)
print(a1 * 2) # scalar times matrix
print(np.sqrt(a2)) # square root of all elements
print(a1 - a2) # difference of two matrices
print(a1 * a2) # element wise multiplication!!!
b1 = np.arange(12).reshape(4, 3)
print(b1.transpose().dot(b1)) # matrix multiplication with tranpose
print(b1.T.dot(b1)) # same as above
print(np.linalg.inv(b1.T.dot(b1))) # matrix inverse
import matplotlib.pyplot as plt
from math import sqrt
pnts = np.array([[1.1, 0.4], [2.6, 1.9], [4.2, 3.0], [7.0, 3.1], [8.2, 2.4], [9.6, 1.2]])
plt.plot(pnts[:,0], pnts[:,1], "o")
c = np.polyfit(pnts[:,0], pnts[:,1], 2) # parabola fitting
v = np.polyval(c, pnts[:,0]) - pnts[:,1] # corrections for y coordinates
rms = sqrt(np.sum(v**2) / pnts.shape[0]) # RMS error
print(c)
x = np.linspace(np.min(pnts[:,0]), np.max(pnts[:,0]), 100)
plt.plot(x, np.polyval(c, x))
plt.plot(pnts[:,0], pnts[:,1], "o")
A = np.array([[3, 4, 2], [-1, 1, 3], [3, -4, 1]])
b = np.array([21, -6, -7])
x = np.linalg.solve(A, b)
print(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating and filling arrays
Step2: Vector filled with random number.
Step3: Matrix filled with constant.
Step4: Identity matrix.
Step5: Create matrix from list and set data type.
Step6: Dimensions and size of a matrix.
Step7: Fill matrix with a series of numbers.
Step8: Accessing elements of an array
Step9: Operations with matrices
Step10: Matrix operations
Step11: Practical examples
Step12: Linear equation system
|
12,545
|
<ASSISTANT_TASK:>
Python Code:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP' / 'Tutorial_11')
if not os.path.exists(testfolder):
os.makedirs(testfolder)
print ("Your simulation will be stored in %s" % testfolder)
from bifacial_radiance import *
import numpy as np
import pandas as pd
simulationname = 'tutorial_11'
#Location:
lat = 40.0583 # NJ
lon = -74.4057 # NJ
# MakeModule Parameters
moduletype='test-module'
numpanels = 3 # AgriPV site has 3 modules along the y direction (N-S since we are facing it to the south) .
x = 0.95
y = 1.95
xgap = 2.0# Leaving 15 centimeters between modules on x direction
ygap = 0.10 # Leaving 10 centimeters between modules on y direction
zgap = 0 # no gap to torquetube.
sensorsy = 6*numpanels # this will give 6 sensors per module, 1 per cell
# Other default values:
# TorqueTube Parameters
axisofrotationTorqueTube=False # this is False by default if there is no torquetbue parameters
torqueTube = False
cellLevelModule = True
numcellsx = 12
numcellsy = 6
xcell = 0.156
ycell = 0.156
xcellgap = 0.02
ycellgap = 0.02
cellLevelModuleParams = {'numcellsx': numcellsx, 'numcellsy':numcellsy,
'xcell': xcell, 'ycell': ycell, 'xcellgap': xcellgap, 'ycellgap': ycellgap}
# SceneDict Parameters
pitch = 15 # m
albedo = 0.2 #'grass' # ground albedo
hub_height = 4.3 # m
nMods = 6 # six modules per row.
nRows = 3 # 3 row
azimuth_ang=180 # Facing south
tilt =35 # tilt.
# Now let's run the example
demo = RadianceObj(simulationname,path = testfolder)
demo.setGround(albedo)
epwfile = demo.getEPW(lat, lon) # NJ lat/lon 40.0583° N, 74.4057
metdata = demo.readWeatherFile(epwfile, coerce_year=2001)
timestamp = metdata.datetime.index(pd.to_datetime('2001-06-17 13:0:0 -5')) # Make this timezone aware, use -5 for EST.
demo.gendaylit(timestamp)
# Making module with all the variables
module=demo.makeModule(name=moduletype,x=x,y=y,numpanels=numpanels,
xgap=xgap, ygap=ygap, cellModule=cellLevelModuleParams)
# create a scene with all the variables
sceneDict = {'tilt':tilt,'pitch': 15,'hub_height':hub_height,'azimuth':azimuth_ang, 'nMods': nMods, 'nRows': nRows}
scene = demo.makeScene(module=moduletype, sceneDict=sceneDict)
octfile = demo.makeOct(demo.getfilelist())
## Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal.
## Simulation will stop until you close the rvu window
#!rvu -vf views\front.vp -e .01 tutorial_11.oct
torquetubelength = module.scenex*(nMods)
name='Post1'
text='! genbox Metal_Aluminum_Anodized torquetube_row1 {} 0.2 0.3 | xform -t {} -0.1 -0.3 | xform -t 0 0 4.2'.format(
torquetubelength, (-torquetubelength+module.sceney)/2.0)
customObject = demo.makeCustomObject(name,text)
demo.appendtoScene(radfile=scene.radfiles, customObject=customObject, text="!xform -rz 0")
name='Post2'
text='! genbox Metal_Aluminum_Anodized torquetube_row2 {} 0.2 0.3 | xform -t {} -0.1 -0.3 | xform -t 0 15 4.2'.format(
torquetubelength, (-torquetubelength+module.sceney)/2.0)
customObject = demo.makeCustomObject(name,text)
demo.appendtoScene(radfile=scene.radfiles, customObject=customObject, text="!xform -rz 0")
name='Post3'
text='! genbox Metal_Aluminum_Anodized torquetube_row2 {} 0.2 0.3 | xform -t {} -0.1 -0.3 | xform -t 0 -15 4.2'.format(
torquetubelength, (-torquetubelength+module.sceney)/2.0)
customObject = demo.makeCustomObject(name,text)
demo.appendtoScene(radfile=scene.radfiles, customObject=customObject, text="!xform -rz 0")
name='Pile'
pile1x = (torquetubelength+module.sceney)/2.0
pilesep = pile1x*2.0/7.0
text= '! genrev Metal_Grey tube1row1 t*4.2 0.15 32 | xform -t {} 0 0'.format(pile1x)
text += '\r\n! genrev Metal_Grey tube1row2 t*4.2 0.15 32 | xform -t {} 15 0'.format(pile1x)
text += '\r\n! genrev Metal_Grey tube1row3 t*4.2 0.15 32 | xform -t {} -15 0'.format(pile1x)
for i in range (1, 7):
text += '\r\n! genrev Metal_Grey tube{}row1 t*4.2 0.15 32 | xform -t {} 0 0'.format(i+1, pile1x-pilesep*i)
text += '\r\n! genrev Metal_Grey tube{}row2 t*4.2 0.15 32 | xform -t {} 15 0'.format(i+1, pile1x-pilesep*i)
text += '\r\n! genrev Metal_Grey tube{}row3 t*4.2 0.15 32 | xform -t {} -15 0'.format(i+1, pile1x-pilesep*i)
customObject = demo.makeCustomObject(name,text)
demo.appendtoScene(radfile=scene.radfiles, customObject=customObject, text="!xform -rz 0")
octfile = demo.makeOct() # makeOct combines all of the ground, sky and object files we just added into a .oct file.
## Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal.
## Simulation will stop until you close the rvu window
#!rvu -vf views\front.vp -e .01 tutorial_11.oct
analysis = AnalysisObj(octfile, demo.name)
sensorsy = 20
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy=sensorsy)
groundscan = frontscan
groundscan['zstart'] = 0.05 # setting it 5 cm from the ground.
groundscan['zinc'] = 0 # no tilt necessary.
groundscan['yinc'] = pitch/(sensorsy-1) # increasing spacing so it covers all distance between rows
groundscan
analysis.analysis(octfile, simulationname+"_groundscan", groundscan, backscan) # compare the back vs front irradiance
import seaborn as sns
sensorsx = 20
startgroundsample=-module.scenex
spacingbetweensamples = module.scenex/(sensorsx-1)
for i in range (0, sensorsx): # Will map 20 points
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy=sensorsy)
groundscan = frontscan
groundscan['zstart'] = 0.05 # setting it 5 cm from the ground.
groundscan['zinc'] = 0 # no tilt necessary.
groundscan['yinc'] = pitch/(sensorsy-1) # increasing spacing so it covers all distance between rows
groundscan['xstart'] = startgroundsample + i*spacingbetweensamples # increasing spacing so it covers all distance between rows
analysis.analysis(octfile, simulationname+"_groundscan_"+str(i), groundscan, backscan) # compare the back vs front irradiance
filestarter = "irr_AgriPV_groundscan_"
filelist = sorted(os.listdir(os.path.join(testfolder, 'results')))
prefixed = [filename for filename in filelist if filename.startswith(filestarter)]
arrayWm2Front = []
arrayWm2Back = []
arrayMatFront = []
arrayMatBack = []
filenamed = []
faillist = []
print('{} files in the directory'.format(filelist.__len__()))
print('{} groundscan files in the directory'.format(prefixed.__len__()))
i = 0 # counter to track # files loaded.
for i in range (0, len(prefixed)):
ind = prefixed[i].split('_')
try:
resultsDF = load.read1Result(os.path.join(testfolder, 'results', prefixed[i]))
arrayWm2Front.append(list(resultsDF['Wm2Front']))
arrayWm2Back.append(list(resultsDF['Wm2Back']))
arrayMatFront.append(list(resultsDF['mattype']))
arrayMatBack.append(list(resultsDF['rearMat']))
filenamed.append(prefixed[i])
except:
print(" FAILED ", i, prefixed[i])
faillist.append(prefixed[i])
resultsdf = pd.DataFrame(list(zip(arrayWm2Front, arrayWm2Back,
arrayMatFront, arrayMatBack)),
columns = ['br_Wm2Front', 'br_Wm2Back',
'br_MatFront', 'br_MatBack'])
resultsdf['filename'] = filenamed
df3 = pd.DataFrame(resultsdf['br_Wm2Front'].to_list())
reversed_df = df3.T.iloc[::-1]
sns.set(rc={'figure.figsize':(11.7,8.27)})
# Plot
ax = sns.heatmap(reversed_df)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel('')
ax.set_xlabel('')
print('')
name='Center_Grass'
carpositionx=-2
carpositiony=-1
text='! genbox white_EPDM CenterPatch 28 12 0.1 | xform -t -14 2 0'.format(carpositionx, carpositiony)
customObject = demo.makeCustomObject(name,text)
demo.appendtoScene(scene.radfiles, customObject, '!xform -rz 0')
octfile = demo.makeOct(demo.getfilelist())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If desired, you can view the Oct file at this point
Step2: And adjust the view parameters, you should see this image.
Step3: View the geometry with the posts on
Step4: <a id='step2'></a>
Step5: This is the result for only one 'chord' accross the ground. Let's now do a X-Y scan of the ground.
Step6: Read all the files generated into one dataframe
Step7: Creating a new dataframe where each element in the front irradiance list is a column. Also transpose and reverse so it looks like a top-down view of the ground.
Step8: <a id='step4'></a>
|
12,546
|
<ASSISTANT_TASK:>
Python Code:
import sympy
from sympy import *
from sympy.abc import x, n, z, t, k
init_printing() # for nice printing, a-la' TeX
%run "sums.py"
# duplicated code, put it into "sums.py"
def expand_sum_in_eq(eq_term):
lhs, rhs = eq_term.lhs, eq_term.rhs
return Eq(lhs, expand_Sum(rhs))
f = IndexedBase('f')
fibs = {f[i]:fibonacci(i) for i in range(100)}
transforming_matrix = Matrix([
[1,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0],
[-1,0,1,1,0,0,0,0,0],
[1,0,0,1,1,0,0,0,0],
[-1,0,0,0,1,1,0,0,0],
[1,0,0,0,0,1,1,0,0],
[-1,0,0,0,0,0,1,1,0],
[1,0,0,0,0,0,0,1,1]])
transforming_matrix
transforming_matrix**(-1)
def gf(t): return t + 1/(1+t)
gf(t).series(n=10)
def h(t): return t*(1+2*t+t**2)/(1+t+t**2)
(gf(t)*h(t)**2).series(n=10)
pascal_matrix = Matrix([
[1,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0],
[1,2,1,0,0,0,0,0,0],
[1,3,3,1,0,0,0,0,0],
[1,4,6,4,1,0,0,0,0],
[1,5,10,10,5,1,0,0,0],
[1,6,15,20,15,6,1,0,0],
[1,7,21,35,35,21,7,1,0],
[1,8,28,56,70,56,28,8,1]])
pascal_matrix
catalan_matrix = Matrix([
[1,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0],
[2,2,1,0,0,0,0,0,0],
[5,5,3,1,0,0,0,0,0],
[14,14,9,4,1,0,0,0,0],
[42,42,28,14,5,1,0,0,0],
[132,132,90,48,20,6,1,0,0],
[429,429,297,165,75,27,7,1,0],
[1430,1430,1001,572,275,110,35,8,1]])
catalan_matrix
catalan_inverse_matrix = Matrix([
[1,0,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0],
[0,-1,1,0,0,0,0,0,0],
[0,0,-2,1,0,0,0,0,0],
[0,0,1,-3,1,0,0,0,0],
[0,0,0,3,-4,1,0,0,0],
[0,0,0,-1,6,-5,1,0,0],
[0,0,0,0,-4,10,-6,1,0]])
catalan_inverse_matrix
odd_transformed_matrix = pascal_matrix * transforming_matrix
odd_transformed_matrix
transforming_matrix * pascal_matrix
(catalan_matrix**(-1) )*odd_transformed_matrix
catalan_inverse_matrix * odd_transformed_matrix
fib_matrix = Matrix([fibonacci(i) for i in range(9)])
fib_matrix_sym = Matrix([f[i] for i in range(9)])
fib_matrix, fib_matrix_sym
a = Wild('a')
std_prod = transforming_matrix * fib_matrix
sym_prod = transforming_matrix * fib_matrix_sym
std_prod, sym_prod, sym_prod.subs({f[0]:0}), sym_prod.subs({f[0]:0}).replace(f[a]+f[a+1],f[a+2])
rhs=pascal_matrix * transforming_matrix * fib_matrix
rhs_sym = pascal_matrix * sym_prod
rhs, rhs_sym
a_range = range(1,18,2)
lhs=Matrix([fibonacci(i) for i in a_range]) - Matrix([1 for i in range(9)])
lhs_sym=Matrix([f[i] for i in a_range]) - Matrix([1 for i in range(9)])
lhs, lhs_sym
Eq(lhs,rhs)
eq_sym = Eq(lhs_sym, rhs_sym)
eq_sym
f_minus1_vector = Matrix([f[-1] for i in range(9)])
one_plus_eq = eq_sym#.subs(-Integer(1),f[-1])
one_plus_eq = Eq(one_plus_eq.lhs + f_minus1_vector, one_plus_eq.rhs + f_minus1_vector)
one_plus_eq = Eq(one_plus_eq.lhs.subs(f[-1],1), one_plus_eq.rhs)
Eq(one_plus_eq.lhs, one_plus_eq.rhs.subs(fibs))
fib0_term = f[0]
eq_sym.subs(fib0_term, fibs[fib0_term])
gen_odd_fibs = Eq(f[2*n+1]-1, Sum(binomial(n+1, k+1)*f[k], (k,1,n)))
Eq(gen_odd_fibs, Sum(binomial(n+1, n-k)*f[k], (k,1,n)))
expand_sum_in_eq(gen_odd_fibs.subs(n, 8))
eq_sym.subs(fibs)
eq_17 = Eq(f[17],f[-1] + rhs_sym[-1])
eq_18_shift = Eq(f[n], f[n-18]+8*f[n-17]+36*f[n-16]+84*f[n-15]+126*f[n-14]+126*f[n-13]+84*f[n-12]+36*f[n-11]+9*f[n-10]+f[n-9])
eq_17, eq_18_shift
[eq_18_shift.subs(n,i).lhs.subs(fibs) - eq_18_shift.subs(n,i).rhs.subs(fibs) for i in range(18,32)]
from itertools import accumulate
to_accumulate = rhs_sym + ones(9,1)*f[-1]
even_rhs = Matrix(list(accumulate(to_accumulate, lambda folded, current_row: Add(folded, current_row) )))
even_lhs = Matrix([f[i] for i in range(2,19,2)])
even_fibs_matrix_eq = Eq(even_lhs, even_rhs)
even_fibs_matrix_eq
even_transformed_matrix = Matrix([
[1,0,0,0,0,0,0,0,0],
[2,1,0,0,0,0,0,0,0],
[4,4,1,0,0,0,0,0,0],
[7,10,5,1,0,0,0,0,0],
[11,20,15,6,1,0,0,0,0],
[16,35,35,21,7,1,0,0,0],
[22,56,70,56,28,8,1,0,0],
[29,84,126,126,84,36,9,1,0],
[37,120,210,252,210,120,45,10,1]])
even_transformed_matrix
even_transforming_matrix = (pascal_matrix**(-1))*even_transformed_matrix
even_transforming_matrix
(catalan_matrix**(-1) )*even_transformed_matrix
catalan_inverse_matrix * even_transformed_matrix
even_transforming_matrix * fib_matrix_sym
even_vector_eq_sym = Eq(even_lhs - Matrix(list(range(1,10))),
pascal_matrix * even_transforming_matrix * fib_matrix_sym)
even_vector_eq_sym
even_vector_eq_sym.subs(fib0_term, fibs[fib0_term])
gen_even_fibs = Eq(f[2*n]-n, Sum(binomial(n+1, k+2)*f[k], (k,1,n-1)))
Eq(gen_even_fibs, Sum(binomial(n+1, n-k-1)*f[k], (k,1,n-1)))
expand_sum_in_eq(gen_even_fibs.subs(n, 9))
even_fibs_matrix_eq_minus1_appear = even_fibs_matrix_eq.subs(fibs)
Eq(even_fibs_matrix_eq.lhs, even_fibs_matrix_eq_minus1_appear, evaluate=False)
list(accumulate([fibonacci(2*i+1)-1 for i in range(21)]))
def n_gf(t): return t/(1-t)**2
n_gf(t).series(n=20)
def odd_fib_gf(t): return t**2/((1-t)**2*(1-3*t+t**2))
odd_fib_gf(t).series(n=20)
composite_odd_fibs_gf = n_gf(t)+odd_fib_gf(t)
composite_odd_fibs_gf.factor(), composite_odd_fibs_gf.series(n=20)
def odd_integers_gf(t): return ((n_gf(t)+n_gf(-t))/2).simplify()
odd_integers_gf(t).series(n=20)
# here is the error: we should use the generating function of F(2n+1) instead of F(n) as done here!
def fib_gf(t): return t/(1-t-t**2)
fib_gf(odd_integers_gf(t)).series(n=20)
def even_fibs_gf(t): return n_gf(t) + fib_gf(t)/(1-t)
even_fibs_gf(t).series(n=10)
even_fibs_matrix_eq_minus1_appear.subs(f[-1],1)
eq_17 = Eq(f[n], 8*f[n-17]+29*f[n-16]+84*f[n-15]+126*f[n-14]+126*f[n-13]+84*f[n-12]+36*f[n-11]+9*f[n-10]+f[n-9])
eq_17
[eq_17.subs(n,i).lhs.subs(fibs) - eq_17.subs(n,i).rhs.subs(fibs) for i in range(17,31)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Since coefficient in the triangle on the rhs are a part of Pascal triangle, namely A104712, the following is a generalization
Step2: again, fibonacci numbers, A000045.
Step3: Since coefficient in the triangle on the rhs are a part of Pascal triangle, namely A104713, the following is a generalization
Step4: summands on the rhs form a known sequence A054452.
|
12,547
|
<ASSISTANT_TASK:>
Python Code:
x = [1,2,3]
lambda x: max(x)
a = range(-5,5)
## with builtins
b = map(abs,a)
c = [abs(x) for x in a]
print b==c,b
## with your own function
b = [x**2 for x in a]
print b==c,b
import types
## filter
a = ['', 'fee', '', '', '', 'fi', '', '', '', '', 'foo', '', '', '', '', '', 'fum']
b = filter(lambda x: len(x) > 0,a)
c = [x for x in a if len(x) > 0]
print b==c,b
## square only the ints and filter the rest
a = [1, '4', 9, 'a', 0, 4]
b = [ x**2 for x in a if type(x)==types.IntType ]
c = map(lambda x: x**2, filter(lambda x: isinstance(x,int) == True,a))
print b==c,b
a = [1, 2, 3, 4]
[elem*2 for elem in
[item+1 for item in li] ]
a1,a2 = [1,2,3],['a','b','c']
print zip(a1,a2)
print zip(*[a1,a2])
dict(zip(a2,a1))
## transpose
a = [[1,2,3],[4,5,6]]
print(a)
b = map(list, zip(*a))
c = [[row[i] for row in a] for i in range(len(a[0]))]
print b==c,b
## rotate (to the right 90 degrees)
b = map(list, zip(*a[::-1]))
c = [[row[i] for row in a[::-1]] for i in range(len(a[0]))]
print b==c,b
## Are list comprehensions always easier?
b = map(lambda x: max(x), zip(*a))
## what is the equivalent list comprehension?
c =[max(tpl) for tpl in [[row[i] for row in a] for i in range(len(a[0]))]]
print b==c,b
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple list comprehensions
Step2: Filtering list comprehensions
Step3: Nested list comprehensions
Step4: Having fun with Zip
|
12,548
|
<ASSISTANT_TASK:>
Python Code:
def greetings(f):
print(f)
def wrapper(*args, **kwargs):
print("dekorator foo mówi: ", "Hello world!")
return f(*args, **kwargs)
return wrapper
@greetings
def foo(a, b):
print(a, b)
print("Foo function")
def bar():
pass
foo(4, 5)
#foo(1, 2)
#print(foo)
#print(bar)
def greetings(a, b, *args, **kwargs):
print("parametry funkcji tworzącej dekorator: ", a, b, args, kwargs)
def wrapper(f):
def inner_wrapper(*args, **kwargs):
print("dekorator foo mówi: ", "Hello world!")
print("parametry przekazane do foo: ", args, kwargs)
return f(*args, **kwargs)
return inner_wrapper
return wrapper
@greetings(1, 2, 3, liczba10=10, liczba20=20)
def foo(a, b):
print(a, b)
print("Foo function")
def bar():
pass
foo(4, 5)
#print(foo)
#print(bar)
def greetings(f):
def wrapper(*args, **kwargs):
print("greetings")
f(*args, **kwargs)
return wrapper
def hello(f):
def wrapper(*args, **kwargs):
print("hello world")
f(*args, **kwargs)
return wrapper
@greetings
@hello
def foo():
print("foo")
foo()
def attrs(**kwargs):
def decorate(f):
for k in kwargs:
setattr(f, k, kwargs[k])
return f
return decorate
@attrs(version="1.0", author="Andrzej Krawczyk")
def foo():
print("Hi!")
print(foo.version)
print(foo.author)
print(foo())
from functools import wraps
def greetings(f):
@wraps(f)
def wrapper(*args, **kwargs):
print("Parametry przekazane do nowej funkcji: ", args, kwargs)
return f(*args, **kwargs)
return wrapper
@greetings
def foo(a, b):
Docstring funkcji foo
print(a, b)
print("Foo function")
foo(4, 5)
#print(foo.__wrapped__)
#print(foo)
#print(foo.__doc__)
class greetings():
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
print("pozdrowienia z dekoratora w formie klasy", self.f)
return self.f(*args, **kwargs)
@greetings
def foo(a, b):
print(a, b)
print("Foo function")
def bar():
pass
foo(4, 5)
print(foo)
print(bar)
from functools import wraps
class greetings():
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def __call__(self, f, *args, **kwargs):
@wraps(f)
def wrapper(*args, **kwargs):
print("pozdrowienia z dekoratora w formie klasy", f)
print(self.a, self.b, self.c)
return f(*args, **kwargs)
return wrapper
@greetings("ala", "ma", "kota")
def foo(a, b):
print("Foo function", a, b)
def bar():
pass
foo(4, 5)
print(foo)
print(bar)
from functools import singledispatch
@singledispatch
def suma(arg):
print("inicjalizacja sumy")
@suma.register(tuple)
def _(arg):
print("suma dla krotki", arg)
return arg[0] + arg[1]
@suma.register(list)
def _(arg):
print("suma dla listy", arg)
return sum(arg)
@suma.register(int)
@suma.register(str)
def _(arg):
print("dla int lub str", arg)
print("int: ", suma(1))
print("str: ", suma("1"))
print("krotka: ", suma((1, 2)))
print("lista: ", suma([1, 2, 3, 4]))
print(suma.dispatch(int))
print(suma.registry)
from functools import lru_cache
import requests
@lru_cache(maxsize=32, typed=False)
def get_web(url):
return requests.get(url=url)
print(get_web("http://www.trojmiasto.pl"))
print(get_web.cache_info())
from functools import partial
from operator import add
def foo(a, b, d=5, e=10):
print(a, b, d, e)
add_to_five = partial(add, 5)
print(add_to_five(2))
print(add_to_five(10))
p1 = partial(foo, d=-5)
p2 = partial(p1, 5)
p2(-3, e=-100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h1><center>?</center></h1>
Step3: moduł functools
Step4: singledispatch
Step5: lru_cache (least recently used)
Step6: partial
|
12,549
|
<ASSISTANT_TASK:>
Python Code:
print('Hello, world')
# This is a code cell
my_variable = 5
print(my_variable)
import hail as hl
from bokeh.io import output_notebook, show
hl.init()
output_notebook()
hl.utils.get_1kg('data/')
! ls -1 data/
hl.import_vcf('data/1kg.vcf.bgz').write('data/1kg.mt', overwrite=True)
mt = hl.read_matrix_table('data/1kg.mt')
mt.describe()
mt.count()
mt.s.show(5)
mt.locus.show(5)
hl.summarize_variants(mt)
mt.aggregate_rows(hl.agg.count_where(mt.alleles == ['A', 'T']))
snp_counts = mt.aggregate_rows(
hl.array(hl.agg.counter(mt.alleles)))
snp_counts
sorted(snp_counts,
key=lambda x: x[1])
! head data/1kg_annotations.txt
sa = hl.import_table('data/1kg_annotations.txt',
impute=True,
key='Sample')
sa.describe()
sa.show()
mt = mt.annotate_cols(pheno = sa[mt.s])
mt.describe()
mt = hl.sample_qc(mt)
mt.sample_qc.describe()
p = hl.plot.scatter(x=mt.sample_qc.r_het_hom_var,
y=mt.sample_qc.call_rate)
show(p)
mt = mt.filter_cols(mt.sample_qc.dp_stats.mean >= 4)
mt = mt.filter_cols(mt.sample_qc.call_rate >= 0.97)
# call rate before filtering
mt.aggregate_entries(hl.agg.fraction(hl.is_defined(mt.GT)))
ab = mt.AD[1] / hl.sum(mt.AD)
filter_condition_ab = (
hl.case()
.when(mt.GT.is_hom_ref(), ab <= 0.1)
.when(mt.GT.is_het(), (ab >= 0.25) & (ab <= 0.75))
.default(ab >= 0.9) # hom-var
)
mt = mt.filter_entries(filter_condition_ab)
# call rate after filtering
mt.aggregate_entries(hl.agg.fraction(hl.is_defined(mt.GT)))
mt = hl.variant_qc(mt)
mt.variant_qc.describe()
mt.variant_qc.AF.show()
mt = mt.filter_rows(hl.min(mt.variant_qc.AF) > 1e-6)
mt = mt.filter_rows(mt.variant_qc.p_value_hwe > 0.005)
# final variant and sample count
mt.count()
gwas = hl.linear_regression_rows(y=mt.pheno.CaffeineConsumption,
x=mt.GT.n_alt_alleles(),
covariates=[1.0])
gwas.describe()
p = hl.plot.manhattan(gwas.p_value)
show(p)
p = hl.plot.qq(gwas.p_value)
show(p)
pca_eigenvalues, pca_scores, pca_loadings = hl.hwe_normalized_pca(mt.GT, compute_loadings=True)
pca_eigenvalues
pca_scores.describe()
pca_scores.scores[0].show()
pca_loadings.describe()
mt = mt.annotate_cols(pca = pca_scores[mt.s])
p = hl.plot.scatter(mt.pca.scores[0],
mt.pca.scores[1],
label=mt.pheno.SuperPopulation)
show(p)
gwas = hl.linear_regression_rows(
y=mt.pheno.CaffeineConsumption,
x=mt.GT.n_alt_alleles(),
covariates=[1.0, mt.pheno.isFemale, mt.pca.scores[0], mt.pca.scores[1], mt.pca.scores[2]])
p = hl.plot.qq(gwas.p_value)
show(p)
p = hl.plot.manhattan(gwas.p_value)
show(p)
gene_ht = hl.import_table('data/ensembl_gene_annotations.txt', impute=True)
gene_ht.show()
gene_ht.count()
gene_ht = gene_ht.transmute(interval = hl.locus_interval(gene_ht['Chromosome'],
gene_ht['Gene start'],
gene_ht['Gene end'],
reference_genome='GRCh37'))
gene_ht = gene_ht.key_by('interval')
mt = mt.annotate_rows(gene_info = gene_ht[mt.locus])
mt.gene_info.show()
burden_mt = (
mt
.group_rows_by(gene = mt.gene_info['Gene name'])
.aggregate(n_variants = hl.agg.count_where(mt.GT.n_alt_alleles() > 0))
)
burden_mt.describe()
burden_results = hl.linear_regression_rows(
y=burden_mt.pheno.CaffeineConsumption,
x=burden_mt.n_variants,
covariates=[1.0,
burden_mt.pheno.isFemale,
burden_mt.pca.scores[0],
burden_mt.pca.scores[1],
burden_mt.pca.scores[2]])
burden_results.order_by(burden_results.p_value).show()
ht = hl.identity_by_descent(mt).cache()
ht.describe()
ht.filter(ht.ibd.PI_HAT > 0.20).show()
ht = hl.impute_sex(mt.GT).cache()
ht.show()
sim_mt = hl.balding_nichols_model(n_populations=3,
n_samples=1000,
n_variants=1000)
# simulate variant effects using spike-and-slab model
spike_prob = 0.2
sim_mt = sim_mt.annotate_rows(beta = hl.rand_bool(spike_prob) * hl.rand_norm(0, 1))
# compute risk scores from betas
sim_mt = sim_mt.annotate_cols(risk = hl.agg.sum(sim_mt.beta * sim_mt.GT.n_alt_alleles()) / sim_mt.count_rows())
show(hl.plot.histogram(sim_mt.risk))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modes
Step2: This is a markdown cell, so even if something looks like code (as below), it won't get executed!
Step3: Common gotcha
Step4: Now we initialize Hail and set up Bokeh to display inline in the notebook.
Step5: Download public 1000 Genomes data
Step6: It is possible to call command-line utilities from Jupyter by prefixing a line with a !
Step7: Part 1
Step8: Read 1KG into Hail
Step9: What is a MatrixTable?
Step10: count
Step11: show
Step12: <font color="brightred"><strong>Exercise
Step13: Most of Hail's functionality is totally general-purpose!
Step14: Or if we had travel data
Step15: By sorting the result in Python, we can recover an interesting bit of biology...
Step16: <font color="brightred"><strong>Question
Step17: We can import it as a Hail Table with hl.import_table.
Step18: While we can see the names and types of fields in the logging messages, we can also describe and show this table
Step19: Add sample metadata into our 1KG MatrixTable
Step20: What's going on here?
Step21: <font color="brightred"><strong>Exercise
Step22: <font color="brightred"><strong>Exercise
Step23: Genotype QC
Step24: Variant QC
Step25: Remove rare sites
Step26: Remove sites far from Hardy-Weinberg equilbrium
Step27: Part 3
Step28: Two of the plots that analysts generally produce are a Manhattan plot and a Q-Q plot.
Step29: Confounded!
Step30: The eigenvalues reflect the amount of variance explained by each principal component
Step31: The scores are the principal components themselves, computed per sample.
Step32: The loadings are the contributions to each component for each variant.
Step33: We can annotate the principal components back onto mt
Step34: Principal components measure ancestry
Step35: <font color="brightred"><strong>Question
Step36: Part 4
Step37: Create an interval key
Step38: Annotate variants using these intervals
Step39: Aggregate genotypes per gene
Step40: What is burden_mt?
Step41: Sorry, no hl.plot.manhattan for genes!
Step42: <font color="brightred"><strong>Exercise
Step43: Infer sex from X-chromosome data
Step44: Simulate genetic data
|
12,550
|
<ASSISTANT_TASK:>
Python Code:
# python standard library
from fractions import Fraction
spam = 'offer is secret, click secret link, secret sports link'.split(',')
print(len(spam))
ham = 'play sports today, went play sports, secret sports event, sports is today, sports costs money'.split(',')
print(len(ham))
class MailBag(object):
A place to put spam or ham
def __init__(self, mail, other_mail, k=0):
:param:
- `mail`: list of example mail
- `other_mail`: mail not in this class (e.g. spam if this is ham)
- `k`: Laplace smoothing constant
self.mail = mail
self.other_mail = other_mail
self.k = k
self._bag = None
self._probability = None
self._vocabulary_size = None
self._sample_size = None
return
@property
def vocabulary_size(self):
:return: count of unique words in all examples
if self._vocabulary_size is None:
self._vocabulary_size = len(set(self.bag) | set(self.bag_boy(self.other_mail)))
return self._vocabulary_size
@property
def bag(self):
:return: list of words in `mail`
if self._bag is None:
self._bag = self.bag_boy(self.mail)
return self._bag
@property
def sample_size(self):
:return: count of mail in both spam and not spam
if self._sample_size is None:
self._sample_size = len(self.mail + self.other_mail)
return self._sample_size
@property
def probability(self):
:return: count of this mail/total sample size
if self._probability is None:
SPAM_AND_HAM = 2
self._probability = self.l_probability(len(self.mail),
len(self.mail) + len(self.other_mail),
SPAM_AND_HAM)
return self._probability
def bag_boy(self, lines):
:param:
- `lines`: list of lines
:return: list of words taken from the lines
tokenized = (line.split() for line in lines)
bag = []
for tokens in tokenized:
for token in tokens:
bag.append(token)
return bag
def l_probability(self, event_size, sample_size, classes):
:param:
- `event_size`: count of events of interest
- `sample_size`: count of all events
- `classes`: count of all classes of events
:return: probability with Laplace Smoothing
return Fraction(event_size + self.k,
sample_size + classes * self.k)
def p_message(self, message):
:param:
- `message`: line of mail
:return: p(message|this class)
probabilities = (self.p_word(word) for word in message.split())
return reduce(lambda x, y: x * y, probabilities) * self.probability
def p_word(self, word):
:param:
- `word`: string to check for
:return: fraction of word occurence in bag
return self.l_probability(self.word_count(word), len(self.bag), self.vocabulary_size)
def word_count(self, word):
:param:
- `word`: string to check for
:return: number of times word appears in bag
return sum((1 for token in self.bag if token == word))
class SpamDetector(object):
A bayesian network spam detector
def __init__(self, spam, ham, k=0):
:param:
- `spam`: list of example spam lines
- `ham`: list of example ham_lines
- `k`: laplace smoothing constant
self.spam = MailBag(mail=spam, k=k, other_mail=ham)
self.ham = MailBag(mail=ham, k=k, other_mail=spam)
return
def p_spam_given_message(self, message):
:param:
- `message`: line to check if it's spam
:return: probability that it's spam
p_message_given_spam = self.spam.p_message(message)
return p_message_given_spam/ (p_message_given_spam +
self.ham.p_message(message))
# leave this in the same cell so updating the class updates the instance
detector = SpamDetector(spam=spam, ham=ham)
l_detector = SpamDetector(spam=spam, ham=ham, k=1)
def bagger(mail):
converts list of lines into list of tokens
:param:
- `mail`: list of space-separated lines
:return: list of words in `mail`
mail_tokenized = (line.split() for line in mail)
mail_bag = []
for tokens in mail_tokenized:
for token in tokens:
mail_bag.append(token)
return mail_bag
spam_bag = bagger(spam)
ham_bag = bagger(ham)
def assert_equal(expected, actual, description):
assert expected == actual, \
"'{2}'\nExpected: {0}, Actual: {1}".format(expected, actual,
description)
vocabulary_list = set(spam_bag) | set(ham_bag)
vocabulary = len(set(spam_bag) | set(ham_bag))
assert_equal(spam_bag, detector.spam.bag, 'check spam bags')
assert_equal(ham_bag, detector.ham.bag, 'ham bags')
assert_equal(vocabulary, detector.spam.vocabulary_size, 'vocabulary size')
print(vocabulary)
mail_count = len(ham) + len(spam)
assert_equal(mail_count, detector.spam.sample_size, 'mail count')
p_spam = Fraction(len(spam), mail_count)
assert_equal(p_spam, Fraction(3, 8), 'p-spam known')
assert_equal(p_spam, detector.spam.probability, 'p-spam detector')
print(p_spam)
def word_count(bag, word):
count the number of times a word is in the bag
:param:
- `bag`: collection of words
- `word`: word to count
:return: number of times word appears in bag
return sum((1 for token in bag if token == word))
def p_word(bag, word, k=0, sample_space=12):
fraction of times word appears in the bag
:param:
- `bag`: collection of words
- `word`: word to count in bag
- `k`: laplace smoothing constant
- `sample_space`: total number of words in vocabulary
:return: Fraction of total bag that is word
return Fraction(word_count(bag, word) + k, len(bag) + k * sample_space)
p_secret_given_spam = p_word(spam_bag, 'secret')
assert p_secret_given_spam == Fraction(3, 9)
assert_equal(p_secret_given_spam, detector.spam.p_word('secret'),
'secret given spam')
print(p_secret_given_spam)
p_secret_given_ham = p_word(ham_bag, 'secret')
assert p_secret_given_ham == Fraction(1, 15)
assert_equal(p_secret_given_ham, detector.ham.p_word('secret'), 'p(secret|ham)')
print(p_secret_given_ham)
%%latex
$p(spam|`sports') = \frac{p(`sports' | spam)p(spam)}{p(`sports')}$
p_sports_given_spam = p_word(spam_bag, 'sports')
assert p_sports_given_spam == Fraction(1, 9)
assert_equal(p_sports_given_spam, detector.spam.p_word('sports'),
'p(sports|spam)')
print(p_sports_given_spam)
p_sports_given_ham = p_word(ham_bag, 'sports')
expected = Fraction(1, 3)
assert p_sports_given_ham == expected
assert_equal(p_sports_given_ham, detector.ham.p_word('sports'),
'p(sports|ham)')
p_ham = Fraction(len(ham), mail_count)
assert_equal(p_ham, detector.ham.probability, 'p(ham)')
print(p_ham)
p_sports = Fraction(word_count(spam_bag, 'sports') + word_count(ham_bag, 'sports'), vocabulary)
print(p_sports)
p_spam_given_sports = (p_sports_given_spam * p_spam)/(p_sports_given_spam * p_spam + p_sports_given_ham * p_ham)
assert p_spam_given_sports == Fraction(3, 18)
assert_equal(p_spam_given_sports, detector.p_spam_given_message('sports'),
'p(spam|sports)')
print(p_spam_given_sports)
%%latex
$p(spam|message) = \frac{p(message|spam)p(spam}{p(message|spam)p(spam) + p(message|ham)p(ham)}$
%%latex
$p(spam|sis) = \frac{p(s|spam)p(i|spam)p(s|spam)p(spam)}{p(s|spam)p(i|spam)p(s|spam)p(spam) + p(s|ham)p(i|ham)p(s|ham)p(ham)}$
p_is_given_spam = p_word(spam_bag, 'is')
assert_equal(p_is_given_spam, detector.spam.p_word('is'), 'p(is|spam)')
p_is_given_ham = p_word(ham_bag, 'is')
assert_equal(p_is_given_ham, detector.ham.p_word('is'), 'p(is|ham)')
def p_message_given_class(message, bag, class_probability, k=0, sample_space=12):
:param:
- `message`: string of words
- `bag`: bag of words
- `class_probability`: probability for this class (e.g. p(spam))
- `k`: Laplace smoothing constant
- `sample_space`: Size of the vocabulary
:return: p(message|classification) * p(classification)
probabilities = (p_word(bag, word, k=k, sample_space=sample_space) for word in message.split())
probability = class_probability
for p in probabilities:
probability *= p
return probability
def p_spam_given_message(message, k=0, sample_space=12):
:param:
- `message`: string of words
- `k`: Laplace Smoothing constant
- `sample_space`: total count of words in spam/ham bags
:return: probability message is spam
spam_probability = p_spam if k == 0 else lp_spam
ham_probability = p_ham if k == 0 else lp_ham
p_m_given_spam = p_message_given_class(message, spam_bag, spam_probability, k=k, sample_space=sample_space)
p_m_given_ham = p_message_given_class(message, ham_bag, ham_probability, k=k, sample_space=sample_space)
return p_m_given_spam/(p_m_given_spam + p_m_given_ham)
message = 'secret is secret'
expected = Fraction(25, 26)
p_sis_given_spam = (p_secret_given_spam * p_is_given_spam * p_secret_given_spam
* p_spam)
assert p_message_given_class(message, spam_bag, p_spam) == p_sis_given_spam
assert_equal(p_sis_given_spam, detector.spam.p_message(message), 'p(sis|spam)')
p_sis_given_ham = p_secret_given_ham * p_is_given_ham * p_secret_given_ham * p_ham
assert p_message_given_class(message, ham_bag, p_ham) == p_sis_given_ham
assert_equal(p_sis_given_ham, detector.ham.p_message(message), 'p(sis|ham)')
p_spam_given_sis = p_sis_given_spam / (p_sis_given_spam + p_sis_given_ham)
assert_equal(p_spam_given_sis, detector.p_spam_given_message(message), 'p(spam|sis)')
assert p_spam_given_message(message) == p_spam_given_sis
assert p_spam_given_sis == expected
print(p_spam_given_sis)
%%latex
$p(spam|tis) = \frac{p(t|spam)p(i|spam)p(s|spam)p(spam)}{p(t|spam)p(i|spam)p(s|spam)p(spam) + p(t|ham)p(i|ham)p(s|ham)p(ham)}$
tis = 'today is secret'
p_spam_given_tis = p_spam_given_message(tis)
print(p_spam_given_tis)
assert p_spam_given_tis == 0
assert_equal(p_spam_given_tis, detector.p_spam_given_message(tis),
'p(spam|tis)')
'today' in spam_bag
%%latex
$p(s) = \frac{s_{count} + k}{total_{count} + k * |classes|}$
def l_probability(class_count, total_count, k=1, classes=2):
:param:
- `class_count`: size of event space
- `total_count`: size of sample space
- `k`: constant to prevent 0 probability
- `classes`: total number of events
:return: probability of class_count with Laplace Smoothing
return Fraction(class_count + k, total_count + classes * k)
k = 1
# classes = spam, ham
number_of_classes = 2
messages = 1
spam_messages = 1
actual = Fraction(spam_messages + k, messages + number_of_classes * k)
assert actual == Fraction(2, 3)
print(actual)
messages, spam_messages = 10, 6
actual = l_probability(spam_messages, messages, k, number_of_classes)
expected = Fraction(spam_messages + k, messages + number_of_classes * k)
assert actual == expected
print(actual)
messages, spam_messages = 100, 60
print(l_probability(spam_messages, messages, k, number_of_classes))
lp_spam = l_probability(total_count=mail_count, class_count=len(spam))
assert_equal(lp_spam, l_detector.spam.probability, 'p(spam)')
lp_ham = l_probability(total_count=mail_count, class_count=len(ham))
assert_equal(lp_ham, l_detector.ham.probability, 'p(ham)')
print(lp_spam)
print(lp_ham)
print(p_word(spam_bag, 'today', k=1, sample_space=vocabulary))
lp_today_given_spam = l_probability(total_count=len(spam_bag),
class_count=word_count(spam_bag, 'today'),
classes=vocabulary)
assert_equal(lp_today_given_spam, l_detector.spam.p_word('today'), 'p(today|spam)')
lp_today_given_ham = l_probability(total_count=len(ham_bag),
class_count=word_count(ham_bag, 'today'),
classes=vocabulary
)
assert_equal(lp_today_given_ham, l_detector.ham.p_word('today'),
'p(today|ham)')
assert lp_today_given_spam == Fraction(1, 21)
assert lp_today_given_ham == Fraction(1, 9)
print('p(today|spam) = {0}'.format(lp_today_given_spam))
print('p(today|ham) = {0}'.format(lp_today_given_ham))
tis = 'today is secret'
lp_is_given_spam = p_word(spam_bag, 'is', k=1, sample_space=vocabulary)
assert_equal(lp_is_given_spam, l_detector.spam.p_word('is'), 'p(is|spam)')
lp_is_given_ham = p_word(ham_bag, 'is', k=1, sample_space=vocabulary)
assert_equal(lp_is_given_ham, l_detector.ham.p_word('is'), 'p(is|ham)')
lp_secret_given_spam = p_word(spam_bag, 'secret', k=1, sample_space=vocabulary)
assert_equal(lp_secret_given_spam, l_detector.spam.p_word('secret'), 'p(secret|spam)')
lp_secret_given_ham = p_word(ham_bag, 'secret', k=1, sample_space=vocabulary)
assert_equal(lp_secret_given_ham, l_detector.ham.p_word('secret'), 'p(secret|ham)')
lp_tis_given_spam = lp_today_given_spam * lp_is_given_spam * lp_secret_given_spam * lp_spam
lp_tis_given_ham = lp_today_given_ham * lp_is_given_ham * lp_secret_given_ham * lp_ham
lp_spam_given_tis = Fraction(lp_tis_given_spam, lp_tis_given_spam + lp_tis_given_ham)
assert_equal(lp_tis_given_spam, l_detector.spam.p_message(tis), 'p(tis|spam)')
assert_equal(lp_tis_given_ham, l_detector.ham.p_message(tis), 'p(tis|ham)')
assert_equal(lp_spam_given_tis, l_detector.p_spam_given_message(tis), 'p(spam|tis)')
print(lp_spam_given_tis)
actual = p_message_given_class(tis, ham_bag, lp_ham, k=1, sample_space=vocabulary)
assert lp_tis_given_ham == actual, "Expected: {0} Actual: {1}".format(lp_tis_given_ham, actual)
actual = p_spam_given_message(message=tis, k=1, sample_space=vocabulary)
assert lp_spam_given_tis == actual , "Expected: {0} Actual: {1}".format(lp_spam_given_tis, actual)
spam_detector = SpamDetector(spam=spam, ham=ham, k=1)
message = 'today is secret'
answer = spam_detector.p_spam_given_message(message)
assert_equal(lp_spam_given_tis, answer, 'p(spam|"today is secret")')
print("p(spam|'today is secret') = {0}".format(answer))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step12: The terms have to be changed to be either all plural or all singular. In this case I changed 'sport' to 'sports' where needed.
Step16: SpamDetector
Step18: What is the size of the vocabulary?
Step19: what is the probability that a piece of mail is spam?
Step22: what is p('secret'| spam)?
Step23: what is p('secret'| ham)?
Step24: You get a message with one word - 'sports', what is p(spam|'sports')?
Step25: Given the message 'secret is secret', what is the probability that it is spam?
Step26: So, the question here is, how do you calculate the probabilities for the entire message instead of for a single word? The answer turns out to be to multiply the probability for each of the words together - so p('secret is secret'| spam) is the product p('secret'|spam) x p('is'|spam) x p('secret'|spam)
Step29: Where s = 'secret', i = 'is' and sis='secret is secret'.
Step30: What is the probability that "today is secret" is spam?
Step31: Since one of the words isn't in the spam bag of words, the numerator is going to be 0 (p('today'|spam) = 0) so the probability overall is 0.
Step33: let k = 1.
Step34: What if you have 10 messages and 6 are spam?
Step35: What if you have 100 messages and 60 are spam?
Step36: spam/ham with Laplace Smoothing
Step37: What are p('today'|spam) and p('today'|ham)?
Step38: What is p(spam|m) if m = 'today is secret' and k=1?
Step39: This is just more double-checking to make sure that the functions I originally wrote match the hand-calculated answers.
Step40: Re-do
|
12,551
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array([1, 2, 3, 4])
print 'Вектор:\n', a
b = np.array([1, 2, 3, 4, 5], dtype=float)
print 'Вещественный вектор:\n', b
c = np.array([True, False, True], dtype=bool)
print 'Булевский вектор:\n', c
print 'Тип булевского вектора:\n', c.dtype
d = np.arange(start=10, stop=20, step=2) # последнее значение не включается!
print 'Вектор чисел от 10 до 20 с шагом 2:\n', d
f = np.arange(start=0, stop=1, step=0.3, dtype=float)
print 'Вещественный вектор чисел от 0 до 1 с шагом 0.3:\n', f
print c.ndim # количество размерностей
print c.shape # shape фактически задает длину вектора
a = np.array([1, 2, 3])
b = np.array([6, 5, 4])
k = 2
print 'Вектор a:', a
print 'Вектор b:', b
print 'Число k:', k
print 'Сумма a и b:\n', a + b
print 'Разность a и b:\n', a - b
print 'Покоординатное умножение a и b:\n', a * b
print 'Умножение вектора на число (осуществляется покоординатно):\n', k * a
from numpy.linalg import norm
a = np.array([1, 2, -3])
print 'Вектор a:', a
print 'L1 норма вектора a:\n', norm(a, ord=1)
a = np.array([1, 2, -3])
print 'Вектор a:', a
print 'L2 норма вектора a:\n', norm(a, ord=2)
a = np.array([1, 2, -3])
b = np.array([-4, 3, 8])
print 'Вектор a:', a
print 'Вектор b:', b
print 'L1 расстояние между векторами a и b:\n', norm(a - b, ord=1)
print 'L2 расстояние между векторами a и b:\n', norm(a - b, ord=2)
from scipy.spatial.distance import cdist
a = np.array([6, 3, -5])
b = np.array([-1, 0, 7])
print 'Вектор a:', a
print 'Его размерность:', a.shape
print 'Вектор b:', b
print 'Его размерность:', b.shape
a = a.reshape((1, 3))
b = b.reshape((1, 3))
print 'После применения метода reshape:\n'
print 'Вектор-строка a:', a
print 'Его размерность:', a.shape
print 'Вектор-строка b:', b
print 'Его размерность:', b.shape
print 'Манхэттенское расстояние между a и b (через cdist):', cdist(a, b, metric='cityblock')
d = np.array([3, 0, 8, 9, -10])
print 'Вектор d:', d
print 'Его размерность:', d.shape
print 'Вектор d с newaxis --> вектор-строка:\n', d[np.newaxis, :]
print 'Полученная размерность:', d[np.newaxis, :].shape
print 'Вектор d с newaxis --> вектор-столбец:\n', d[:, np.newaxis]
print 'Полученная размерность:', d[:, np.newaxis].shape
a = np.array([6, 3, -5])
b = np.array([-1, 0, 7])
print 'Евклидово расстояние между a и b (через cdist):', cdist(a[np.newaxis, :],
b[np.newaxis, :],
metric='euclidean')
a = np.array([0, 5, -1])
b = np.array([-4, 9, 3])
print 'Вектор a:', a
print 'Вектор b:', b
print 'Скалярное произведение a и b (через функцию):', np.dot(a, b)
print 'Скалярное произведение a и b (через метод):', a.dot(b)
cos_angle = np.dot(a, b) / norm(a) / norm(b)
print 'Косинус угла между a и b:', cos_angle
print 'Сам угол:', np.arccos(cos_angle)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Создание векторов
Step2: Тип значений вектора можно узнать с помощью numpy.ndarray.dtype
Step3: Другим способом задания вектора является функция numpy.arange(([start, ]stop, [step, ]...), которая задает последовательность чисел заданного типа из промежутка [start, stop) через шаг step
Step4: По сути вектор в NumPy является одномерным массивом, что соответствует интуитивному определению вектора
Step5: Обратите внимание
Step6: 3. Нормы векторов
Step7: $\ell_{1}$ норма
Step8: $\ell_{2}$ норма
Step9: Более подробно о том, какие еще нормы (в том числе матричные) можно вычислить, см. документацию.
Step10: Также расстояние между векторами можно посчитать с помощью функции scipy.spatial.distance.cdist(XA, XB, metric='euclidean', p=2, ...) из модуля SciPy, предназначенного для выполнения научных и инженерных расчётов.
Step11: scipy.spatial.distance.cdist(...) требует, чтобы размерность XA и XB была как минимум двумерная. По этой причине для использования этой функции необходимо преобразовать векторы, которые мы рассматриваем в этом ноутбуке, к вектор-строкам с помощью способов, которые мы рассмотрим ниже.
Step12: Заметим, что после применения этого метода размерность полученных вектор-строк будет равна shape. Следующий метод позволяет сделать такое же преобразование, но не изменяет размерность исходного вектора.
Step13: Важно, что np.newaxis добавляет к размерности ось, длина которой равна 1 (это и логично, так как количество элементов должно сохраняться). Таким образом, надо вставлять новую ось там, где нужна единица в размерности.
Step14: Эта функция также позволяет вычислять попарные расстояния между множествами векторов. Например, пусть у нас имеется матрица размера $m_{A} \times n$. Мы можем рассматривать ее как описание некоторых $m_{A}$ наблюдений в $n$-мерном пространстве. Пусть также имеется еще одна аналогичная матрица размера $m_{B} \times n$, где $m_{B}$ векторов в том же $n$-мерном пространстве. Часто необходимо посчитать попарные расстояния между векторами первого и второго множеств. В этом случае можно пользоваться функцией scipy.spatial.distance.cdist(XA, XB, metric='euclidean', p=2, ...), где в качестве XA, XB необходимо передать две описанные матрицы. Функция возвращает матрицу попарных расстояний размера $m_{A} \times m_{B}$, где элемент матрицы на $[i, j]$-ой позиции равен расстоянию между $i$-тым вектором первого множества и $j$-ым вектором второго множества.
Step15: Скалярное произведение в пространстве $\mathbb{R}^{n}$ для двух векторов $x = (x_{1}, \dots, x_{n})$ и $y = (y_{1}, \dots, y_{n})$ определяется как
Step16: Длиной вектора $x = (x_{1}, \dots, x_{n}) \in \mathbb{R}^{n}$ называется квадратный корень из скалярного произведения, то есть длина равна евклидовой норме вектора
|
12,552
|
<ASSISTANT_TASK:>
Python Code:
# @title Install dependencies
# @markdown Only execute if not already installed and running a cloud runtime
!pip install -q timesketch_api_client
# @title Import libraries
# @markdown This cell will import all the libraries needed for the running of this colab.
import altair as alt # For graphing.
import numpy as np # Never know when this will come in handy.
import pandas as pd # We will be using pandas quite heavily.
from timesketch_api_client import config
# @title Sketch Configuration
# @markdown Set the sketch ID for the rest of the notebook
Sketch_id = 1 #@param {type:"integer"}
# @title Timesketch connnection
ts_client = config.get_client()
sketch = ts.get_sketch(Sketch_id)
#message: A string that will be used as the message string.
message = "" #@param {type:"string"}
#date: A string with the timestamp of the message.
# This should be in a human readable format, eg: "2020-09-03T22:52:21".
# if empty it will set to now
date = "2020-08-06T12:48:06.994188Z" #@param {type:"string"}
#timestamp_desc : Description of the timestamp.
timestamp_desc = "Test_description" #@param {type:"string"}
#attributes: A dict of extra attributes to add to the event.
attributes = {"a": "alpha", "o": "omega", "g": "gamma"}
#tags: A list of strings to include as tags.
tags = ["not", "important"]
sketch.add_event(message, date, timestamp_desc, attributes, tags)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fill in your event data
Step2: Attributes / Tags (optional)
|
12,553
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv('../input/fifa-2018-match-statistics/FIFA 2018 Statistics.csv')
y = (data['Man of the Match'] == "Yes") # Convert from string "Yes"/"No" to binary
feature_names = [i for i in data.columns if data[i].dtype in [np.int64]]
X = data[feature_names]
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
my_model = RandomForestClassifier(n_estimators=100,
random_state=0).fit(train_X, train_y)
import eli5
from eli5.sklearn import PermutationImportance
perm = PermutationImportance(my_model, random_state=1).fit(val_X, val_y)
eli5.show_weights(perm, feature_names = val_X.columns.tolist())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here is how to calculate and show importances with the eli5 library
|
12,554
|
<ASSISTANT_TASK:>
Python Code:
# In order to run this code, you need an already trianed model (see the accompanying notebook)
import graphlab as gl
model = gl.load_model('pattern_mining_model.gl')
model
def predict(x):
# Construct an SFrame
sf = gl.SFrame(x)
# Add your own business logic here
# Call the predict method on the model.
predictions = model.predict(sf)
return predictions['prediction']
import graphlab as gl
# Replace with your path.
ps_state_path = 's3://<your-bucket-name>/predictive_service/ps'
# Set your AWS credentials.
gl.aws.set_credentials(<key>, <secret>)
# Create an EC2 config
ec2_config = gl.deploy.Ec2Config()
# Launch a predictive service
ps = gl.deploy.predictive_service.create(name = 'sklearn-predictive-service',
ec2_config = ec2_config, state_path = ps_state_path, num_hosts = 1)
import graphlab as gl
ps = gl.deploy.predictive_service.load('s3://gl-demo-usw2/predictive_service/demolab/ps-1.6')
ps
# ps.add('pattern-mining', predict) (When you add this for the first time)
ps.update('pattern-mining', predict)
ps.apply_changes()
# test query to make sure the model works fine
ps.query('pattern-mining', x={'Receipt': [1], 'StoreNum': [2], 'Item': ['CherryTart']})
import json
import requests
def restful_query(x):
headers = {'content-type': 'application/json'}
payload = {'api_key':'b437e588-0f2b-45e1-81c8-ce3acfa81ade', "data":{"x": x}}
end_point = 'http://demolab-one-six-2015364754.us-west-2.elb.amazonaws.com/query/pattern-mining'
return requests.post(end_point, json.dumps(payload), headers=headers).json()
restful_query({'Receipt': [1], 'StoreNum': [2], 'Item': ['CherryTart']})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can expose the trained model as a REST endpoint. This will allow other applications to consume the predictions from the model.
Step2: 2. Create a Predictive Service (One time) <a id='create'></a>
Step3: Load an already created service
Step4: Query the model <a id='query'></a>
Step5: Query from external applications via REST
|
12,555
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'ukesm1-0-mmh', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
12,556
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.5
from google.cloud import bigquery
import tensorflow as tf
import numpy as np
import shutil
print(tf.__version__)
CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
def read_dataset(filename, mode, batch_size = 512):
def decode_csv(value_column):
columns = tf.compat.v1.decode_csv(value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
# No need to features.pop('key') since it is not specified in the INPUT_COLUMNS.
# The key passes through the graph unused.
return features, label
# Create list of file names that match "glob" pattern (i.e. data_file_*.csv)
filenames_dataset = tf.data.Dataset.list_files(filename)
# Read lines from text files
textlines_dataset = filenames_dataset.flat_map(tf.data.TextLineDataset)
# Parse text lines as comma-separated values (CSV)
dataset = textlines_dataset.map(decode_csv)
# Note:
# use tf.data.Dataset.flat_map to apply one to many transformations (here: filename -> text lines)
# use tf.data.Dataset.map to apply one to one transformations (here: text line -> feature list)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
INPUT_COLUMNS = [
tf.feature_column.numeric_column('pickuplon'),
tf.feature_column.numeric_column('pickuplat'),
tf.feature_column.numeric_column('dropofflat'),
tf.feature_column.numeric_column('dropofflon'),
tf.feature_column.numeric_column('passengers'),
]
def add_more_features(feats):
# Nothing to add (yet!)
return feats
feature_cols = add_more_features(INPUT_COLUMNS)
# Defines the expected shape of the JSON feed that the model
# will receive once deployed behind a REST API in production.
def serving_input_fn():
json_feature_placeholders = {
'pickuplon' : tf.compat.v1.placeholder(tf.float32, [None]),
'pickuplat' : tf.compat.v1.placeholder(tf.float32, [None]),
'dropofflat' : tf.compat.v1.placeholder(tf.float32, [None]),
'dropofflon' : tf.compat.v1.placeholder(tf.float32, [None]),
'passengers' : tf.compat.v1.placeholder(tf.float32, [None]),
}
# You can transforma data here from the input format to the format expected by your model.
features = json_feature_placeholders # no transformation needed
return tf.estimator.export.ServingInputReceiver(features, json_feature_placeholders)
def train_and_evaluate(output_dir, num_train_steps):
estimator = tf.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = feature_cols)
train_spec=tf.estimator.TrainSpec(
input_fn = lambda: read_dataset('./taxi-train.csv', mode = tf.estimator.ModeKeys.TRAIN),
max_steps = num_train_steps)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec=tf.estimator.EvalSpec(
input_fn = lambda: read_dataset('./taxi-valid.csv', mode = tf.estimator.ModeKeys.EVAL),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
OUTDIR = './taxi_trained'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
tf.compat.v1.summary.FileWriterCache.clear()
train_and_evaluate(OUTDIR, num_train_steps = 500)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Input </h2>
Step2: <h2> Create features out of input data </h2>
Step3: <h2> Serving input function </h2>
Step4: <h2> tf.estimator.train_and_evaluate </h2>
Step5: <h2>Run training</h2>
|
12,557
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
tf.enable_eager_execution()
tfe = tf.contrib.eager # Shorthand for some symbols
from math import pi
def f(x):
return tf.square(tf.sin(x))
assert f(pi/2).numpy() == 1.0
# grad_f will return a list of derivatives of f
# with respect to its arguments. Since f() has a single argument,
# grad_f will return a list with a single element.
grad_f = tfe.gradients_function(f)
assert tf.abs(grad_f(pi/2)[0]).numpy() < 1e-7
def f(x):
return tf.square(tf.sin(x))
def grad(f):
return lambda x: tfe.gradients_function(f)(x)[0]
x = tf.lin_space(-2*pi, 2*pi, 100) # 100 points between -2π and +2π
import matplotlib.pyplot as plt
plt.plot(x, f(x), label="f")
plt.plot(x, grad(f)(x), label="first derivative")
plt.plot(x, grad(grad(f))(x), label="second derivative")
plt.plot(x, grad(grad(grad(f)))(x), label="third derivative")
plt.legend()
plt.show()
def f(x, y):
output = 1
for i in range(y):
output = tf.multiply(output, x)
return output
def g(x, y):
# Return the gradient of `f` with respect to it's first parameter
return tfe.gradients_function(f)(x, y)[0]
assert f(3.0, 2).numpy() == 9.0 # f(x, 2) is essentially x * x
assert g(3.0, 2).numpy() == 6.0 # And its gradient will be 2 * x
assert f(4.0, 3).numpy() == 64.0 # f(x, 3) is essentially x * x * x
assert g(4.0, 3).numpy() == 48.0 # And its gradient will be 3 * x * x
x = tf.ones((2, 2))
# TODO(b/78880779): Remove the 'persistent=True' argument and use
# a single t.gradient() call when the bug is resolved.
with tf.GradientTape(persistent=True) as t:
# TODO(ashankar): Explain with "watch" argument better?
t.watch(x)
y = tf.reduce_sum(x)
z = tf.multiply(y, y)
# Use the same tape to compute the derivative of z with respect to the
# intermediate value y.
dz_dy = t.gradient(z, y)
assert dz_dy.numpy() == 8.0
# Derivative of z with respect to the original input tensor x
dz_dx = t.gradient(z, x)
for i in [0, 1]:
for j in [0, 1]:
assert dz_dx[i][j].numpy() == 8.0
# TODO(ashankar): Should we use the persistent tape here instead? Follow up on Tom and Alex's discussion
x = tf.constant(1.0) # Convert the Python 1.0 to a Tensor object
with tf.GradientTape() as t:
with tf.GradientTape() as t2:
t2.watch(x)
y = x * x * x
# Compute the gradient inside the 't' context manager
# which means the gradient computation is differentiable as well.
dy_dx = t2.gradient(y, x)
d2y_dx2 = t.gradient(dy_dx, x)
assert dy_dx.numpy() == 3.0
assert d2y_dx2.numpy() == 6.0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Automatic differentiation and gradient tape
Step2: Derivatives of a function
Step3: Higher-order gradients
Step4: Gradient tapes
Step5: At times it may be inconvenient to encapsulate computation of interest into a function. For example, if you want the gradient of the output with respect to intermediate values computed in the function. In such cases, the slightly more verbose but explicit tf.GradientTape context is useful. All computation inside the context of a tf.GradientTape is "recorded".
Step6: Higher-order gradients
|
12,558
|
<ASSISTANT_TASK:>
Python Code:
# we assume that we have the dynet module in your path.
# OUTDATED: we also assume that LD_LIBRARY_PATH includes a pointer to where libcnn_shared.so is.
from dynet import *
pc = ParameterCollection()
NUM_LAYERS=2
INPUT_DIM=50
HIDDEN_DIM=10
builder = LSTMBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, pc)
# or:
# builder = SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, pc)
s0 = builder.initial_state()
x1 = vecInput(INPUT_DIM)
s1=s0.add_input(x1)
y1 = s1.output()
# here, we add x1 to the RNN, and the output we get from the top is y (a HIDEN_DIM-dim vector)
y1.npvalue().shape
s2=s1.add_input(x1) # we can add another input
y2=s2.output()
print s2.h()
# create a simple rnn builder
rnnbuilder=SimpleRNNBuilder(NUM_LAYERS, INPUT_DIM, HIDDEN_DIM, pc)
# initialize a new graph, and a new sequence
rs0 = rnnbuilder.initial_state()
# add inputs
rs1 = rs0.add_input(x1)
ry1 = rs1.output()
print "all layers:", s1.h()
print s1.s()
rnn_h = rs1.h()
rnn_s = rs1.s()
print "RNN h:", rnn_h
print "RNN s:", rnn_s
lstm_h = s1.h()
lstm_s = s1.s()
print "LSTM h:", lstm_h
print "LSTM s:", lstm_s
s2=s1.add_input(x1)
s3=s2.add_input(x1)
s4=s3.add_input(x1)
# let's continue s3 with a new input.
s5=s3.add_input(x1)
# we now have two different sequences:
# s0,s1,s2,s3,s4
# s0,s1,s2,s3,s5
# the two sequences share parameters.
assert(s5.prev() == s3)
assert(s4.prev() == s3)
s6=s3.prev().add_input(x1)
# we now have an additional sequence:
# s0,s1,s2,s6
s6.h()
s6.s()
state = rnnbuilder.initial_state()
xs = [x1,x1,x1]
states = state.add_inputs(xs)
outputs = [s.output() for s in states]
hs = [s.h() for s in states]
print outputs, hs
state = rnnbuilder.initial_state()
xs = [x1,x1,x1]
outputs = state.transduce(xs)
print outputs
import random
from collections import defaultdict
from itertools import count
import sys
LAYERS = 2
INPUT_DIM = 50
HIDDEN_DIM = 50
characters = list("abcdefghijklmnopqrstuvwxyz ")
characters.append("<EOS>")
int2char = list(characters)
char2int = {c:i for i,c in enumerate(characters)}
VOCAB_SIZE = len(characters)
pc = ParameterCollection()
srnn = SimpleRNNBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, pc)
lstm = LSTMBuilder(LAYERS, INPUT_DIM, HIDDEN_DIM, pc)
params = {}
params["lookup"] = pc.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
params["R"] = pc.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
params["bias"] = pc.add_parameters((VOCAB_SIZE))
# return compute loss of RNN for one sentence
def do_one_sentence(rnn, sentence):
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(params["R"])
bias = parameter(params["bias"])
lookup = params["lookup"]
sentence = ["<EOS>"] + list(sentence) + ["<EOS>"]
sentence = [char2int[c] for c in sentence]
s = s0
loss = []
for char,next_char in zip(sentence,sentence[1:]):
s = s.add_input(lookup[char])
probs = softmax(R*s.output() + bias)
loss.append( -log(pick(probs,next_char)) )
loss = esum(loss)
return loss
# generate from model:
def generate(rnn):
def sample(probs):
rnd = random.random()
for i,p in enumerate(probs):
rnd -= p
if rnd <= 0: break
return i
# setup the sentence
renew_cg()
s0 = rnn.initial_state()
R = parameter(params["R"])
bias = parameter(params["bias"])
lookup = params["lookup"]
s = s0.add_input(lookup[char2int["<EOS>"]])
out=[]
while True:
probs = softmax(R*s.output() + bias)
probs = probs.vec_value()
next_char = sample(probs)
out.append(int2char[next_char])
if out[-1] == "<EOS>": break
s = s.add_input(lookup[next_char])
return "".join(out[:-1]) # strip the <EOS>
# train, and generate every 5 samples
def train(rnn, sentence):
trainer = SimpleSGDTrainer(pc)
for i in xrange(200):
loss = do_one_sentence(rnn, sentence)
loss_value = loss.value()
loss.backward()
trainer.update()
if i % 5 == 0:
print loss_value,
print generate(rnn)
sentence = "a quick brown fox jumped over the lazy dog"
train(srnn, sentence)
sentence = "a quick brown fox jumped over the lazy dog"
train(lstm, sentence)
train(srnn, "these pretzels are making me thirsty")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An LSTM/RNN overview
Step2: Note that when we create the builder, it adds the internal RNN parameters to the ParameterCollection.
Step3: If our LSTM/RNN was one layer deep, y2 would be equal to the hidden state. However, since it is 2 layers deep, y2 is only the hidden state (= output) of the last layer.
Step4: The same interface that we saw until now for the LSTM, holds also for the Simple RNN
Step5: To summarize, when calling .add_input(x) on an RNNState what happens is that the state creates a new RNN/LSTM column, passing it
Step6: As we can see, the LSTM has two extra state expressions (one for each hidden layer) before the outputs h.
Step7: Aside
Step8: This is convenient.
Step9: Character-level LSTM
Step10: Notice that
Step11: The model seem to learn the sentence quite well.
|
12,559
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q -U tf-nightly
import tensorflow as tf
tf.enable_eager_execution()
A = tf.constant([[2.0, 0.0], [0.0, 3.0]])
import numpy as np
print("Tensors are backed by NumPy arrays, which are accessible through their "
"`.numpy()` method:\n", A)
assert(type(A.numpy()) == np.ndarray)
print("\nOperations (like `tf.matmul(A, A)`) execute "
"immediately (no more Sessions!):\n", tf.matmul(A, A))
print("\nTensors behave like NumPy arrays: you can iterate over them and "
"supply them as inputs to most functions that expect NumPy arrays:")
for i, row in enumerate(A):
for j, entry in enumerate(row):
print("A[%d, %d]^2 == %d" % (i, j, np.square(entry)))
import tensorflow.contrib.eager as tfe
w = tfe.Variable(3.0)
with tf.GradientTape() as tape:
loss = w ** 2
dw, = tape.gradient(loss, [w])
print("\nYou can use `tf.GradientTape` to compute the gradient of a "
"computation with respect to a list of `tf.contrib.eager.Variable`s;\n"
"for example, `tape.gradient(loss, [w])`, where `loss` = w ** 2 and "
"`w` == 3.0, yields`", dw,"`.")
if tf.test.is_gpu_available():
with tf.device(tf.test.gpu_device_name()):
B = tf.constant([[2.0, 0.0], [0.0, 3.0]])
print(tf.matmul(B, B))
import matplotlib.pyplot as plt
def gen_regression_data(num_examples=1000, p=0.2):
X = tf.random_uniform(shape=(num_examples,), maxval=50)
w_star = tf.random_uniform(shape=(), maxval=10)
b_star = tf.random_uniform(shape=(), maxval=10)
noise = tf.random_normal(shape=(num_examples,), mean=0.0, stddev=10.0)
# With probability 1 - p, y := y * -1.
sign = 2 * np.random.binomial(1, 1 - p, size=(num_examples,)) - 1
# You can freely mix Tensors and NumPy arrays in your computations:
# `sign` is a NumPy array, but the other symbols below are Tensors.
Y = sign * (w_star * X + b_star + noise)
return X, Y
X, Y = gen_regression_data()
plt.plot(X, Y, "go") # You can plot Tensors!
plt.title("Observed data")
plt.show()
def huber_loss(y, y_hat, m=1.0):
# Enabling eager execution lets you use Python control flow.
delta = tf.abs(y - y_hat)
return delta ** 2 if delta <= m else m * (2 * delta - m)
import time
from google.colab import widgets
import tensorflow.contrib.eager as tfe # Needed to create tfe.Variable objects.
class Regression(object):
def __init__(self, loss_fn):
super(Regression, self).__init__()
self.w = tfe.Variable(0.0)
self.b = tfe.Variable(0.0)
self.variables = [self.w, self.b]
self.loss_fn = loss_fn
def predict(self, x):
return x * self.w + self.b
def regress(model, optimizer, dataset, epochs=5, log_every=1, num_examples=1000):
plot = log_every is not None
if plot:
# Colab provides several widgets for interactive visualization.
tb = widgets.TabBar([str(i) for i in range(epochs) if i % log_every == 0])
X, Y = dataset.batch(num_examples).make_one_shot_iterator().get_next()
X = tf.reshape(X, (num_examples,))
Y = tf.reshape(Y, (num_examples,))
for epoch in range(epochs):
iterator = dataset.make_one_shot_iterator()
epoch_loss = 0.0
start = time.time()
for x_i, y_i in iterator:
batch_loss_fn = lambda: model.loss_fn(y_i, model.predict(x_i))
optimizer.minimize(batch_loss_fn, var_list=model.variables)
epoch_loss += batch_loss_fn()
duration = time.time() - start
if plot and epoch % log_every == 0:
with tb.output_to(str(epoch)):
print("Epoch %d took %0.2f seconds, resulting in a loss of %0.4f." % (
epoch, duration, epoch_loss))
plt.plot(X, Y, "go", label="data")
plt.plot(X, model.predict(X), "b", label="regression")
plt.legend()
huber_regression = Regression(huber_loss)
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
regress(huber_regression,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0001),
dataset=dataset)
import pdb
def buggy_loss(y, y_hat):
pdb.set_trace()
huber_loss(y, y_hat)
print("Type 'exit' to stop the debugger, or 's' to step into `huber_loss` and "
"'n' to step through it.")
try:
buggy_loss(1.0, 2.0)
except:
pass
import cProfile
import pstats
huber_regression = Regression(huber_loss)
cProfile.run(
"regress(model=huber_regression, "
"optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.001), "
"dataset=dataset, log_every=None)", "prof")
pstats.Stats("prof").strip_dirs().sort_stats("cumulative").print_stats(10)
print("Most of the time is spent during backpropagation and binary operations.")
import os
import six
from six.moves import urllib
def parse(line):
Parse a line from the colors dataset.
# `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def load_dataset(data_dir, url, batch_size):
Loads the colors data at path into a PaddedDataset.
path = tf.keras.utils.get_file(os.path.basename(url), url, cache_dir=data_dir)
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(batch_size,
padded_shapes=([None], [None, None], []))
return dataset, path
train_url = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv"
test_url = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv"
data_dir = "/tmp/rnn/data"
train_data, train_path = load_dataset(data_dir, train_url, batch_size=64)
eval_data, _ = load_dataset(data_dir, test_url, batch_size=64)
import pandas
pandas.read_csv(train_path).head(10)
colors, one_hot_chars, lengths = tfe.Iterator(train_data).next()
colors[:10].numpy()
class RNNColorbot(tf.keras.Model):
Multi-layer RNN that predicts RGB tuples given color names.
def __init__(self):
super(RNNColorbot, self).__init__()
self.keep_prob = 0.5
self.lower_cell = tf.contrib.rnn.LSTMBlockCell(256)
self.upper_cell = tf.contrib.rnn.LSTMBlockCell(128)
self.relu = tf.layers.Dense(3, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
Generates RGB tuples from `inputs`, a tuple (`chars`, `sequence_length`).
(chars, sequence_length) = inputs
chars = tf.transpose(chars, [1, 0, 2]) # make `chars` time-major
batch_size = int(chars.shape[1])
for cell in [self.lower_cell, self.upper_cell]:
outputs = []
state = cell.zero_state(batch_size, tf.float32)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
chars = outputs
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss_fn(labels, predictions):
return tf.reduce_mean((predictions - labels) ** 2)
def train_one_epoch(model, optimizer, train_data, log_every=10):
iterator = tfe.Iterator(train_data)
for batch,(labels, chars, sequence_length) in enumerate(iterator):
with tf.GradientTape() as tape:
predictions = model((chars, sequence_length), training=True)
loss = loss_fn(labels, predictions)
variables = model.variables
grad = tape.gradient(loss, variables)
optimizer.apply_gradients([(g, v) for g, v in zip(grad, variables)])
if log_every and batch % log_every == 0:
print("train/batch #%d\tloss: %.6f" % (batch, loss))
batch += 1
def test(model, eval_data):
total_loss = 0.0
iterator = eval_data.make_one_shot_iterator()
for labels, chars, sequence_length in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
total_loss += loss_fn(labels, predictions)
print("eval/loss: %.6f\n" % total_loss)
model = RNNColorbot()
optimizer = tf.train.AdamOptimizer(learning_rate=.01)
# Create a `Checkpoint` for saving and restoring state; the keywords
# supplied `Checkpoint`'s constructor are the names of the objects to be saved
# and restored, and their corresponding values are the actual objects. Note
# that we're saving `optimizer` in addition to `model`, since `AdamOptimizer`
# maintains state.
import tensorflow.contrib.eager as tfe
checkpoint = tfe.Checkpoint(model=model, optimizer=optimizer)
checkpoint_prefix = "/tmp/rnn/ckpt"
# The next line loads the most recent checkpoint, if any.
checkpoint.restore(tf.train.latest_checkpoint("/tmp/rnn"))
for epoch in range(4):
train_one_epoch(model, optimizer, train_data)
test(model, eval_data)
checkpoint.save(checkpoint_prefix)
print("Colorbot is ready to generate colors!")
tb = widgets.TabBar(["RNN Colorbot"])
while True:
with tb.output_to(0):
try:
color_name = six.moves.input(
"Give me a color name (or press 'enter' to exit): ")
except (EOFError, KeyboardInterrupt):
break
if not color_name:
break
_, chars, length = parse(color_name)
preds, = model((np.expand_dims(chars, 0), np.expand_dims(length, 0)),
training=False)
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
with tb.output_to(0):
tb.clear_tab()
print("Predicted RGB tuple:", rgb)
plt.imshow([[clipped_preds]])
plt.title(color_name)
plt.show()
# Our original implementation of `huber_loss` is not compatible with non-scalar
# data. Your task is to fix that. For your convenience, the original
# implementation is reproduced below.
#
# def huber_loss(y, y_hat, m=1.0):
# delta = tf.abs(y - y_hat)
# return delta ** 2 if delta <= m else m * (2 * delta - m)
#
def batched_huber_loss(y, y_hat, m=1.0):
# TODO: Uncomment out the below code and replace `...` with your solution.
# Hint: Tensors are immutable.
# Hint: `tf.where` might be useful.
delta = tf.abs(y - y_hat)
# ...
# ...
# return ...
regression = Regression(batched_huber_loss)
num_epochs = 4
batch_sizes = [1, 10, 20, 100, 200, 500, 1000]
times = []
X, Y = gen_regression_data(num_examples=1000)
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001)
for size in batch_sizes:
batched_dataset = dataset.batch(size)
start = time.time()
regress(model=regression, optimizer=optimizer, dataset=batched_dataset,
epochs=num_epochs, log_every=None)
end = time.time()
times.append((end - start) / num_epochs)
regression.w.assign(0.0)
regression.b.assign(0.0)
plt.figure()
plt.plot(batch_sizes, times, "bo")
plt.xlabel("batch size")
plt.ylabel("time (seconds)")
plt.semilogx()
plt.semilogy()
plt.title("Time per Epoch vs. Batch Size")
plt.show()
def batched_huber_loss(y, y_hat, m=1.0):
delta = tf.abs(y - y_hat)
quadratic = delta ** 2
linear = m * (2 * delta - m)
return tf.reduce_mean(tf.where(delta <= m, quadratic, linear))
regression = Regression(batched_huber_loss)
num_epochs = 4
batch_sizes = [2, 10, 20, 100, 200, 500, 1000]
times = []
X, Y = gen_regression_data(num_examples=1000)
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001)
for size in batch_sizes:
batched_dataset = dataset.batch(size)
start = time.time()
regress(model=regression, optimizer=optimizer, dataset=batched_dataset,
epochs=num_epochs, log_every=None)
end = time.time()
times.append((end - start) / num_epochs)
regression.w.assign(0.0)
regression.b.assign(0.0)
plt.figure()
plt.plot(batch_sizes, times, "bo")
plt.xlabel("batch size")
plt.ylabel("time (seconds)")
plt.semilogx()
plt.semilogy()
plt.title("Time per Epoch vs. Batch Size")
plt.show()
class BuggyModel(tf.keras.Model):
def __init__(self):
super(BuggyModel, self).__init__()
self._input_shape = [-1, 28, 28, 1]
self.conv = tf.layers.Conv2D(filters=32, kernel_size=5, padding="same",
data_format="channels_last")
self.fc = tf.layers.Dense(10)
self.max_pool2d = tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding="same", data_format="channels_last")
def call(self, inputs):
y = inputs
y = self.conv(y)
y = self.max_pool2d(y)
return self.fc(y)
buggy_model = BuggyModel()
inputs = tf.random_normal(shape=(100, 28, 28))
outputs = buggy_model(inputs)
assert outputs.shape == (100, 10), "invalid output shape: %s" % outputs.shape
class BuggyModel(tf.keras.Model):
def __init__(self):
super(BuggyModel, self).__init__()
self._input_shape = [-1, 28, 28, 1]
self.conv = tf.layers.Conv2D(filters=32, kernel_size=5, padding="same",
data_format="channels_last")
self.fc = tf.layers.Dense(10)
self.max_pool2d = tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding="same", data_format="channels_last")
def call(self, inputs):
y = tf.reshape(inputs, self._input_shape)
y = self.conv(y)
y = self.max_pool2d(y)
y = tf.layers.flatten(y)
return self.fc(y)
buggy_model = BuggyModel()
inputs = tf.random_normal(shape=(100, 28, 28))
outputs = buggy_model(inputs)
assert outputs.shape == (100, 10), "invalid output shape: %s" % outputs.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Eager execution
Step2: 2. A NumPy-like library for numerical computation and machine learning
Step3: Tensors behave similarly to NumPy arrays, but they don't behave exactly the
Step4: Variables and Gradients
Step5: GPU usage
Step6: Fitting a Huber regression
Step7: Huber loss
Step8: A simple class for regressions
Step9: Run the following cell to fit the model! Note that enabling eager execution makes it
Step10: Debugging and profiling
Step11: Leverage the Python profiler to dig into the relative costs of training your model.
Step14: 3. Neural networks
Step17: Defining and training a neural network
Step18: The next cell trains our RNNColorbot, restoring and saving checkpoints of the learned variables along the way. Thanks to checkpointing, every run of the below cell will resume training from wherever the previous run left off. For more on checkpointing, take a look at our user guide.
Step19: Paint me a color, Colorbot!
Step20: 4. Exercises
Step21: Solution
Step22: Exercise 2
Step23: Solution
|
12,560
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
masses = np.arange(0.1, 0.96, 0.05) # list of masses
from scipy.interpolate import interp1d
ages = np.arange(5.0e6, 3.1e7, 1.0e6) # ages requested
# open output file objects
output_files = [open('files/dmestar_{:07.1f}myr_gas07_z+0.00_a+0.00_mag25kG.iso'.format(age/1.0e6), 'w')
for age in ages]
trk_directory = '../../evolve/dmestar/trk/gas07/p000/a0/amlt2040/mag25kG'
for mass in masses:
trk_filename = 'm{:04.0f}_GAS07_p000_p0_y26_mlt2.040_mag25kG.trk'.format(mass*1000.)
try:
gas07_trk = np.genfromtxt('{0}/{1}'.format(trk_directory, trk_filename), usecols=(0, 1, 2, 3, 4, 8))
except IOError:
continue
# extract only relevant age chunk for easier interpolation
gas07_trk = np.array([time_step for time_step in gas07_trk if 1.0e6 <= time_step[0] <= 5.0e7])
# generate linear interpolation curve as a function of age
try:
icurve = interp1d(gas07_trk[:, 0], gas07_trk[:, 1:], kind='linear', axis=0)
except IndexError:
continue
# extract properties at the requested age
trk_props = icurve(ages)
i = 0
for props in trk_props:
s = '{:6.3f}'.format(mass)
for prop in props:
if np.isnan(prop) or prop < -12.0:
prop = -12.0
s += '{:14.6f}'.format(prop)
s += '\n'
output_files[i].write(s)
i += 1
#print "{:4.2f} Mo Track Processed.".format(mass)
# close output files
for f in output_files:
f.close()
fine_mass_grid = np.arange(0.1, 0.95, 0.02)
for age in ages:
iso_filename = 'files/dmestar_{:07.1f}myr_gas07_z+0.00_a+0.00_mag25kG.iso'.format(age/1.0e6)
isochrone = np.genfromtxt(iso_filename)
# generate interpolation curve
icurve = interp1d(isochrone[:,0], isochrone[:,1:], axis=0, kind='slinear')
# interpolate onto a finer mass grid
fine_isochrone = icurve(fine_mass_grid)
fine_isochrone = np.column_stack((fine_mass_grid, fine_isochrone))
# write header
header = 'Dartmouth Stellar Evolution Model: Quick Isochrone \n\n'
header += 'Age = {:7.1f} Myr [Fe/H] = {:+5.2f} [a/Fe] = {:+5.2f} \n\n'.format(age/1.e6, 0.0, 0.0)
header += '{:^14} {:^14} {:^14} {:^14} {:^14} {:^14}'.format('Mass', 'log(Teff)', 'log(g)', 'log(L/Lo)',
'log(R/Ro)', 'A(Li)')
# overwrite original file
np.savetxt(iso_filename, fine_isochrone, fmt='%14.6f', header=header)
masses = np.arange(0.10, 0.86, 0.05) # higher masses did not converge (investigating)
# open output file objects
output_files = [open('files/dmestar_{:07.1f}myr_gs98_z+0.00_a+0.00_mag25kG.iso'.format(age/1.0e6), 'w')
for age in ages]
trk_directory = '../../evolve/dmestar/trk/gs98/p000/a0/amlt1884/mag25kG'
for mass in masses:
trk_filename = 'm{:04.0f}_GS98_p000_p0_y28_mlt1.884_mag25kG.trk'.format(mass*1000.)
try:
gs98_trk = np.genfromtxt('{0}/{1}'.format(trk_directory, trk_filename), usecols=(0, 1, 2, 3, 4, 8))
except IOError:
continue
# extract only relevant age chunk for easier interpolation
gs98_trk = np.array([time_step for time_step in gs98_trk if 1.0e6 <= time_step[0] <= 5.0e7])
# generate linear interpolation curve as a function of age
try:
icurve = interp1d(gs98_trk[:, 0], gs98_trk[:, 1:], kind='linear', axis=0)
except IndexError:
continue
# extract properties at the requested age
trk_props = icurve(ages)
i = 0
for props in trk_props:
s = '{:6.3f}'.format(mass)
for prop in props:
if np.isnan(prop) or prop < -12.0:
prop = -12.0
s += '{:14.6f}'.format(prop)
s += '\n'
output_files[i].write(s)
i += 1
#print "{:4.2f} Mo Track Processed.".format(mass)
# close output files
for f in output_files:
f.close()
fine_mass_grid = np.arange(0.1, 0.85, 0.02)
for age in ages:
iso_filename = 'files/dmestar_{:07.1f}myr_gs98_z+0.00_a+0.00_mag25kG.iso'.format(age/1.0e6)
isochrone = np.genfromtxt(iso_filename)
# generate interpolation curves
icurve = interp1d(isochrone[:,0], isochrone[:,1:], axis=0, kind='slinear')
# interpolate onto a finer mass grid
fine_isochrone = icurve(fine_mass_grid)
fine_isochrone = np.column_stack((fine_mass_grid, fine_isochrone))
# write header
header = 'Dartmouth Stellar Evolution Model: Quick Isochrone \n\n'
header += 'Age = {:7.1f} Myr [Fe/H] = {:+5.2f} [a/Fe] = {:+5.2f} \n\n'.format(age/1.e6, 0.0, 0.0)
header += '{:^14} {:^14} {:^14} {:^14} {:^14} {:^14}'.format('Mass', 'log(Teff)', 'log(g)', 'log(L/Lo)',
'log(R/Ro)', 'A(Li)')
# overwrite original file
np.savetxt(iso_filename, fine_isochrone, fmt='%14.6f', header=header)
# GS98 isochrones
gs98_05 = np.genfromtxt('files/dmestar_00005.0myr_gs98_z+0.00_a+0.00_mag25kG.iso')
gs98_12 = np.genfromtxt('files/dmestar_00012.0myr_gs98_z+0.00_a+0.00_mag25kG.iso')
gs98_30 = np.genfromtxt('files/dmestar_00030.0myr_gs98_z+0.00_a+0.00_mag25kG.iso')
# GAS07 isochrones
gas07_05 = np.genfromtxt('files/dmestar_00005.0myr_gas07_z+0.00_a+0.00_mag25kG.iso')
gas07_12 = np.genfromtxt('files/dmestar_00012.0myr_gas07_z+0.00_a+0.00_mag25kG.iso')
gas07_30 = np.genfromtxt('files/dmestar_00030.0myr_gas07_z+0.00_a+0.00_mag25kG.iso')
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].set_title('GAS07 Series', fontsize=22.)
ax[1].set_title('GS98 Series', fontsize=22.)
for axis in ax:
axis.set_xlabel('Effective Temperature (K)', fontsize=20.)
axis.set_ylabel('$\\log (L / L_{\\odot})$', fontsize=20.)
axis.set_xlim(4500., 2500.)
axis.set_ylim(-2.5, 0.0)
axis.tick_params(which='major', axis='both', length=10., labelsize=16.)
# GAS07 series
ax[0].plot(10.0**gas07_05[:, 1], gas07_05[:, 3], '-', lw=2, color='#1e90ff')
ax[0].plot(10.0**gas07_12[:, 1], gas07_12[:, 3], '--', lw=2, color='#1e90ff')
ax[0].plot(10.0**gas07_30[:, 1], gas07_30[:, 3], '-.', lw=2, color='#1e90ff')
# GS98 series
ax[1].plot(10.0**gs98_05[:, 1], gs98_05[:, 3], '-', lw=2, color='#1e90ff')
ax[1].plot(10.0**gs98_12[:, 1], gs98_12[:, 3], '--', lw=2, color='#1e90ff')
ax[1].plot(10.0**gs98_30[:, 1], gs98_30[:, 3], '-.', lw=2, color='#1e90ff')
fig.tight_layout()
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].set_title('GAS07 Series', fontsize=22.)
ax[1].set_title('GS98 Series', fontsize=22.)
for axis in ax:
axis.set_xlabel('Effective Temperature (K)', fontsize=20.)
axis.set_ylabel('A(Li)', fontsize=20.)
axis.set_xlim(4500., 2500.)
axis.set_ylim(2.5, 3.5)
axis.tick_params(which='major', axis='both', length=10., labelsize=16.)
axis.plot([4500., 2500.], [3.30, 3.30], '--', lw=1, color="#555555")
# GAS07 series
ax[0].plot(10.0**gas07_05[:, 1], gas07_05[:, 5], '-', lw=2, color='#1e90ff')
ax[0].plot(10.0**gas07_12[:, 1], gas07_12[:, 5], '--', lw=2, color='#1e90ff')
ax[0].plot(10.0**gas07_30[:, 1], gas07_30[:, 5], '-.', lw=2, color='#1e90ff')
# GS98 series
ax[1].plot(10.0**gs98_05[:, 1], gs98_05[:, 5], '-', lw=2, color='#1e90ff')
ax[1].plot(10.0**gs98_12[:, 1], gs98_12[:, 5], '--', lw=2, color='#1e90ff')
ax[1].plot(10.0**gs98_30[:, 1], gs98_30[:, 5], '-.', lw=2, color='#1e90ff')
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Magnetic Mass Tracks
Step2: Magnetic Isochrones
Step3: Dartmouth & MARCS; Solar abundance
Step4: Interpolate isochrones onto a finer mass grid.
Step5: Magnetic isochrones are stored in the directory files/ and follow the format outline in the two code snippets above. We can take a quick look at some of the proeprties of these isochrones and how they compare to standard stellar evolution isochrones (i.e., without a magnetic perturbation).
Step6: Interpolate onto a finer mass grid,
Step7: Magnetic isochrones are stored in the directory files/ and follow the format outline in the two code snippets above. We can take a quick look at some of the proeprties of these isochrones and how they compare to standard stellar evolution isochrones (i.e., without a magnetic perturbation).
Step8: There looks to be some noise in the GS98 isochrones at the highest temperatures, which is likely related to the convergence issues with those above $0.90 M_{\odot}$. Nevertheless, the isochrones appear quite smooth.
|
12,561
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-mh', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
12,562
|
<ASSISTANT_TASK:>
Python Code:
import ovation.lab.constants as constants
import ovation.lab.results as results
import ovation.download as dowload
from ovation.lab.session import connect
s = connect(input("Email: "), api=constants.LAB_STAGING_HOST) # use constants.LAB_PRODUCTION_HOST for production
batch = input("Batch (workflow ID): ")
fastq_results = results.get_sample_results(s, result_type='fastq', workflow_id=2418)
urls = results.get_file_urls(s, fastq_results)
download.download_urls([d['url'] for d in urls])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: API Key
Step2: Find fastq results for samples in batch
Step3: results.get_sample_results pulls all WorkflowSampleResults for the given batch and result type. In the workflow, Ovation associated each file with its corresponding sample, and created a WorkflowSampleResult that referenced the uploaded file.
Step4: results.get_file_urls retrieves temporary, pre-signed download URLs for each file Resource described by the WorkflowSampleResults.
Step5: urls is a list of dictionaries that have a "url" attribute and an "etag" attribute. You can use the pre-signed "url" to read/download the file. The etag is like a "version" of the file. If the file has changed, the etag will change too.
|
12,563
|
<ASSISTANT_TASK:>
Python Code:
%%R
library(dplyr)
playoffs <- data_frame(
holiday = 'playoff',
ds = as.Date(c('2008-01-13', '2009-01-03', '2010-01-16',
'2010-01-24', '2010-02-07', '2011-01-08',
'2013-01-12', '2014-01-12', '2014-01-19',
'2014-02-02', '2015-01-11', '2016-01-17',
'2016-01-24', '2016-02-07')),
lower_window = 0,
upper_window = 1
)
superbowls <- data_frame(
holiday = 'superbowl',
ds = as.Date(c('2010-02-07', '2014-02-02', '2016-02-07')),
lower_window = 0,
upper_window = 1
)
holidays <- bind_rows(playoffs, superbowls)
playoffs = pd.DataFrame({
'holiday': 'playoff',
'ds': pd.to_datetime(['2008-01-13', '2009-01-03', '2010-01-16',
'2010-01-24', '2010-02-07', '2011-01-08',
'2013-01-12', '2014-01-12', '2014-01-19',
'2014-02-02', '2015-01-11', '2016-01-17',
'2016-01-24', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
superbowls = pd.DataFrame({
'holiday': 'superbowl',
'ds': pd.to_datetime(['2010-02-07', '2014-02-02', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
holidays = pd.concat((playoffs, superbowls))
%%R
m <- prophet(df, holidays = holidays)
forecast <- predict(m, future)
m = Prophet(holidays=holidays)
forecast = m.fit(df).predict(future)
%%R
forecast %>%
select(ds, playoff, superbowl) %>%
filter(abs(playoff + superbowl) > 0) %>%
tail(10)
forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][
['ds', 'playoff', 'superbowl']][-10:]
%%R -w 9 -h 12 -u in
prophet_plot_components(m, forecast)
fig = m.plot_components(forecast)
%%R
m <- prophet(holidays = holidays)
m <- add_country_holidays(m, country_name = 'US')
m <- fit.prophet(m, df)
m = Prophet(holidays=holidays)
m.add_country_holidays(country_name='US')
m.fit(df)
%%R
m$train.holiday.names
m.train_holiday_names
%%R -w 9 -h 12 -u in
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
forecast = m.predict(future)
fig = m.plot_components(forecast)
%%R -w 9 -h 3 -u in
m <- prophet(df)
prophet:::plot_yearly(m)
from prophet.plot import plot_yearly
m = Prophet().fit(df)
a = plot_yearly(m)
%%R -w 9 -h 3 -u in
m <- prophet(df, yearly.seasonality = 20)
prophet:::plot_yearly(m)
from prophet.plot import plot_yearly
m = Prophet(yearly_seasonality=20).fit(df)
a = plot_yearly(m)
%%R -w 9 -h 9 -u in
m <- prophet(weekly.seasonality=FALSE)
m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
m <- fit.prophet(m, df)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
m = Prophet(weekly_seasonality=False)
m.add_seasonality(name='monthly', period=30.5, fourier_order=5)
forecast = m.fit(df).predict(future)
fig = m.plot_components(forecast)
%%R
is_nfl_season <- function(ds) {
dates <- as.Date(ds)
month <- as.numeric(format(dates, '%m'))
return(month > 8 | month < 2)
}
df$on_season <- is_nfl_season(df$ds)
df$off_season <- !is_nfl_season(df$ds)
def is_nfl_season(ds):
date = pd.to_datetime(ds)
return (date.month > 8 or date.month < 2)
df['on_season'] = df['ds'].apply(is_nfl_season)
df['off_season'] = ~df['ds'].apply(is_nfl_season)
%%R -w 9 -h 12 -u in
m <- prophet(weekly.seasonality=FALSE)
m <- add_seasonality(m, name='weekly_on_season', period=7, fourier.order=3, condition.name='on_season')
m <- add_seasonality(m, name='weekly_off_season', period=7, fourier.order=3, condition.name='off_season')
m <- fit.prophet(m, df)
future$on_season <- is_nfl_season(future$ds)
future$off_season <- !is_nfl_season(future$ds)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
m = Prophet(weekly_seasonality=False)
m.add_seasonality(name='weekly_on_season', period=7, fourier_order=3, condition_name='on_season')
m.add_seasonality(name='weekly_off_season', period=7, fourier_order=3, condition_name='off_season')
future['on_season'] = future['ds'].apply(is_nfl_season)
future['off_season'] = ~future['ds'].apply(is_nfl_season)
forecast = m.fit(df).predict(future)
fig = m.plot_components(forecast)
%%R
m <- prophet(df, holidays = holidays, holidays.prior.scale = 0.05)
forecast <- predict(m, future)
forecast %>%
select(ds, playoff, superbowl) %>%
filter(abs(playoff + superbowl) > 0) %>%
tail(10)
m = Prophet(holidays=holidays, holidays_prior_scale=0.05).fit(df)
forecast = m.predict(future)
forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][
['ds', 'playoff', 'superbowl']][-10:]
%%R
m <- prophet()
m <- add_seasonality(
m, name='weekly', period=7, fourier.order=3, prior.scale=0.1)
m = Prophet()
m.add_seasonality(
name='weekly', period=7, fourier_order=3, prior_scale=0.1)
%%R -w 9 -h 12 -u in
nfl_sunday <- function(ds) {
dates <- as.Date(ds)
month <- as.numeric(format(dates, '%m'))
as.numeric((weekdays(dates) == "Sunday") & (month > 8 | month < 2))
}
df$nfl_sunday <- nfl_sunday(df$ds)
m <- prophet()
m <- add_regressor(m, 'nfl_sunday')
m <- fit.prophet(m, df)
future$nfl_sunday <- nfl_sunday(future$ds)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
def nfl_sunday(ds):
date = pd.to_datetime(ds)
if date.weekday() == 6 and (date.month > 8 or date.month < 2):
return 1
else:
return 0
df['nfl_sunday'] = df['ds'].apply(nfl_sunday)
m = Prophet()
m.add_regressor('nfl_sunday')
m.fit(df)
future['nfl_sunday'] = future['ds'].apply(nfl_sunday)
forecast = m.predict(future)
fig = m.plot_components(forecast)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Above we have included the superbowl days as both playoff games and superbowl games. This means that the superbowl effect will be an additional additive bonus on top of the playoff effect.
Step2: The holiday effect can be seen in the forecast dataframe
Step3: The holiday effects will also show up in the components plot, where we see that there is a spike on the days around playoff appearances, with an especially large spike for the superbowl
Step4: Individual holidays can be plotted using the plot_forecast_component function (imported from prophet.plot in Python) like plot_forecast_component(m, forecast, 'superbowl') to plot just the superbowl holiday component.
Step5: You can see which holidays were included by looking at the train_holiday_names (Python) or train.holiday.names (R) attribute of the model
Step6: The holidays for each country are provided by the holidays package in Python. A list of available countries, and the country name to use, is available on their page
Step7: Fourier Order for Seasonalities
Step8: The default values are often appropriate, but they can be increased when the seasonality needs to fit higher-frequency changes, and generally be less smooth. The Fourier order can be specified for each built-in seasonality when instantiating the model, here it is increased to 20
Step9: Increasing the number of Fourier terms allows the seasonality to fit faster changing cycles, but can also lead to overfitting
Step10: Seasonalities that depend on other factors
Step11: Then we disable the built-in weekly seasonality, and replace it with two weekly seasonalities that have these columns specified as a condition. This means that the seasonality will only be applied to dates where the condition_name column is True. We must also add the column to the future dataframe for which we are making predictions.
Step12: Both of the seasonalities now show up in the components plots above. We can see that during the on-season when games are played every Sunday, there are large increases on Sunday and Monday that are completely absent during the off-season.
Step13: The magnitude of the holiday effect has been reduced compared to before, especially for superbowls, which had the fewest observations. There is a parameter seasonality_prior_scale which similarly adjusts the extent to which the seasonality model will fit the data.
Step14: Additional regressors
|
12,564
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q git+https://github.com/quantumlib/ReCirq/
import recirq
import recirq.quantum_chess.ascii_board as ab
b = ab.AsciiBoard()
b.reset()
print(b)
from recirq.quantum_chess.move import Move
from recirq.quantum_chess.enums import MoveType, MoveVariant
m = Move(
source="b1",
target="a3",
target2="c3",
move_type=MoveType.SPLIT_JUMP,
move_variant=MoveVariant.BASIC,
)
b.reset()
r = b.apply(m)
print(b)
from recirq.quantum_chess.quantum_board import CirqBoard
from recirq.quantum_chess.bit_utils import bit_to_square, xy_to_bit
from recirq.quantum_chess.move import to_rank
global_board = CirqBoard(1)
def print_game(board):
board.print_debug_log()
print("\n")
print(board)
print("\n\n")
probs = global_board.get_probability_distribution()
print_game(global_board)
def init(board, init_basis_state):
board.with_state(init_basis_state)
probs = board.get_probability_distribution()
print_game(board)
return {
"probabilities": probs,
"empty": board.get_empty_squares_bitboard(),
"full": board.get_full_squares_bitboard(),
}
r = init(global_board, 0xFFFF00000000FFFF)
from recirq.quantum_chess.move import Move
from recirq.quantum_chess.enums import MoveType, MoveVariant
# Helper function for creating a split move from json values
def get_split_move(move_json):
return Move(
move_json["square1"],
move_json["square2"],
target2=move_json["square3"],
move_type=MoveType(move_json["type"]),
move_variant=MoveVariant(move_json["variant"]),
)
# Helper function for creating a merge move from json values
def get_merge_move(move_json):
return Move(
move_json["square1"],
move_json["square3"],
source2=move_json["square2"],
move_type=MoveType(move_json["type"]),
move_variant=MoveVariant(move_json["variant"]),
)
# Helper function for creating a standard move from json values
def get_standard_move(move_json):
return Move(
move_json["square1"],
move_json["square2"],
move_type=MoveType(move_json["type"]),
move_variant=MoveVariant(move_json["variant"]),
)
def do_move(board, move):
board.clear_debug_log()
r = board.do_move(move)
probs = board.get_probability_distribution()
print_game(board)
return {
"result": r,
"probabilities": probs,
"empty": board.get_empty_squares_bitboard(),
"full": board.get_full_squares_bitboard(),
}
move_json = {
"square1": "b1",
"square2": "a3",
"square3": "c3",
"type": MoveType.SPLIT_JUMP,
"variant": MoveVariant.BASIC,
}
split_b1_a3_c3 = get_split_move(move_json)
r = init(global_board, 0xFFFF00000000FFFF)
r = do_move(global_board, split_b1_a3_c3)
from recirq.quantum_chess.enums import ErrorMitigation
from cirq import DensityMatrixSimulator, google
from cirq.contrib.noise_models import DepolarizingNoiseModel
NOISY_SAMPLER = DensityMatrixSimulator(
noise=DepolarizingNoiseModel(depol_prob=0.004)
)
noisy_board = CirqBoard(
0,
sampler=NOISY_SAMPLER,
device=google.Sycamore,
error_mitigation=ErrorMitigation.Correct,
noise_mitigation=0.05,
)
r = init(noisy_board, 0xFFFF00000000FFFF)
r = do_move(noisy_board, split_b1_a3_c3)
def undo_last_move(board):
board.clear_debug_log()
r = board.undo_last_move()
probs = board.get_probability_distribution()
print_game(board)
return {
"result": r,
"probabilities": probs,
"empty": board.get_empty_squares_bitboard(),
"full": board.get_full_squares_bitboard(),
}
r = init(global_board, 0xFFFF00000000FFFF)
r = do_move(global_board, split_b1_a3_c3)
r = undo_last_move(global_board)
!pip install -q flask flask_restful flask-ngrok
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from flask_ngrok import run_with_ngrok
class Init(Resource):
def get(self):
return {"about": "Init"}
def post(self):
print(request.get_json())
n = request.get_json()["init_basis_state"]
global_board.clear_debug_log()
return init(global_board, int(n))
class DoMove(Resource):
def post(self):
move_json = request.get_json()
t = MoveType(move_json["type"])
# We need to convert square indices to square names.
move_json["square1"] = bit_to_square(move_json["square1"])
move_json["square2"] = bit_to_square(move_json["square2"])
move_json["square3"] = bit_to_square(move_json["square3"])
if t == MoveType.SPLIT_SLIDE or t == MoveType.SPLIT_JUMP:
return do_move(global_board, get_split_move(move_json))
elif t == MoveType.MERGE_JUMP or t == MoveType.MERGE_SLIDE:
return do_move(global_board, get_merge_move(move_json))
else:
return do_move(global_board, get_standard_move(move_json))
class UndoLastMove(Resource):
def post(self):
return undo_last_move(global_board)
app = Flask(__name__)
run_with_ngrok(app)
api = Api(app)
api.add_resource(Init, "/quantumboard/init")
api.add_resource(DoMove, "/quantumboard/do_move")
api.add_resource(UndoLastMove, "/quantumboard/undo_last_move")
@app.route("/")
def home():
return "<h1>Running Flask on Google Colab!</h1>"
# docs_infra: no_execute
app.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quantum Chess REST API
Step2: It is possible to play the game in interactive mode, by applying moves to the board. Split the knight on b1 to a3 and c3.
Step3: In Quantum Chess, a move can be uniquely defined by up to 3 squares, a type, and a variant.
Step4: The REST API
Step5: Implement do_move
Step6: Notice, the circuit for the move is printed as well. This is made available in the board debug information. You can also see what happens when initializing the board using a noisy simulator with error mitigation.
Step7: You may notice that the circuit run discarded some of the returned samples due to error mitigation and post selection.
Step8: REST server implementation
Step9: Define the REST endpoints for the webserver
Step10: And start the local webserver
|
12,565
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pylab as plt
X = [10, 20, 30, 40, 50, 60, 70]
y = [40, 45, 50, 65, 70, 70, 80]
plt.figure(figsize=(4, 2))
plt.scatter(X, y)
plt.xlabel("training time"); plt.ylabel("productivity")
plt.show()
plt.figure(figsize=(6, 3))
plt.scatter(X, y)
plt.xlabel("training time")
plt.ylabel("productivity")
plt.plot((0, 70), (25, 85)); plt.plot((0, 70), (35, 75)); plt.plot((0, 70), (45, 65))
plt.ylim(20, 80); plt.xlim(0, 70)
plt.legend('abc', loc='upper left')
plt.show()
import matplotlib.pylab as plt
import numpy as np
X = np.array([2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
y = np.array([10, 14, 17, 19, 21, 22, 24, 26, 24, 23])
n = len(X)
plt.scatter(X, y)
plt.show()
print(np.dot(X, y))
print(np.dot(X, X))
print(np.sum(X))
print(np.sum(y))
b = (n * np.dot(X,y) - np.sum(X)*np.sum(y)) / (n*np.dot(X, X) - (np.sum(X))**2)
a = np.mean(y) - b * np.mean(X)
print(a)
print(b)
def linear_model(x):
result = b * x + a
print(result)
linear_model(15)
from sklearn.linear_model import LinearRegression
X = np.array([2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
y = np.array([10, 14, 17, 19, 21, 22, 24, 26, 24, 23])
X = X.reshape(len(X), 1)
y = y.reshape(len(y), 1)
model = LinearRegression().fit(X, y)
print(model.coef_)
print(model.intercept_)
XX = np.arange(0, 25)
yy = model.coef_ * XX + model.intercept_
yy = np.array(yy).reshape(25)
plt.plot(XX, yy, "r-")
plt.scatter(X, y)
plt.xlim(0, 22)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: scatter plot 을 보면 훈련시간이 늘어남에 따라 생산성도 높아지고 있음을 쉽게 볼 수 있다. 따라서 훈련시간과 생산성 사이에 밀접한 관계가 있음을 알 수 있다.
Step2: 가장 좋은 표본회귀식은 전체적으로 추정오차, 즉 잔차를 가장 작게 해 줄 수 있는 것
Step3: 예측
Step4: sklearn 을 활요한 회귀식 도출
|
12,566
|
<ASSISTANT_TASK:>
Python Code:
ph_sel_name = "Dex"
data_id = "12d"
# ph_sel_name = "all-ph"
# data_id = "7d"
from fretbursts import *
init_notebook()
from IPython.display import display
data_dir = './data/singlespot/'
import os
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)
## Selection for POLIMI 2012-11-26 datatset
labels = ['17d', '27d', '7d', '12d', '22d']
files_dict = {lab: fname for lab, fname in zip(labels, file_list)}
files_dict
ph_sel_map = {'all-ph': Ph_sel('all'), 'Dex': Ph_sel(Dex='DAem'),
'DexDem': Ph_sel(Dex='Dem')}
ph_sel = ph_sel_map[ph_sel_name]
data_id, ph_sel_name
d = loader.photon_hdf5(filename=files_dict[data_id])
d.ph_times_t, d.det_t
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
plot_alternation_hist(d)
loader.alex_apply_period(d)
d
d.time_max
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)
dplot(d, timetrace_bg)
d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
bs_kws = dict(L=10, m=10, F=7, ph_sel=ph_sel)
d.burst_search(**bs_kws)
th1 = 30
ds = d.select_bursts(select_bursts.size, th1=30)
bursts = (bext.burst_data(ds, include_bg=True, include_ph_index=True)
.round({'E': 6, 'S': 6, 'bg_d': 3, 'bg_a': 3, 'bg_aa': 3, 'nd': 3, 'na': 3, 'naa': 3, 'nda': 3, 'nt': 3, 'width_ms': 4}))
bursts.head()
burst_fname = ('results/bursts_usALEX_{sample}_{ph_sel}_F{F:.1f}_m{m}_size{th}.csv'
.format(sample=data_id, th=th1, **bs_kws))
burst_fname
bursts.to_csv(burst_fname)
assert d.dir_ex == 0
assert d.leakage == 0
print(d.ph_sel)
dplot(d, hist_fret);
# if data_id in ['7d', '27d']:
# ds = d.select_bursts(select_bursts.size, th1=20)
# else:
# ds = d.select_bursts(select_bursts.size, th1=30)
ds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)
n_bursts_all = ds.num_bursts[0]
def select_and_plot_ES(fret_sel, do_sel):
ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)
ds_do = ds.select_bursts(select_bursts.ES, **do_sel)
bpl.plot_ES_selection(ax, **fret_sel)
bpl.plot_ES_selection(ax, **do_sel)
return ds_fret, ds_do
ax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)
if data_id == '7d':
fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)
do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '12d':
fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '17d':
fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '22d':
fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '27d':
fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
n_bursts_do = ds_do.num_bursts[0]
n_bursts_fret = ds_fret.num_bursts[0]
n_bursts_do, n_bursts_fret
d_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)
print ('D-only fraction:', d_only_frac)
dplot(ds_fret, hist2d_alex, scatter_alpha=0.1);
dplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);
def hsm_mode(s):
Half-sample mode (HSM) estimator of `s`.
`s` is a sample from a continuous distribution with a single peak.
Reference:
Bickel, Fruehwirth (2005). arXiv:math/0505419
s = memoryview(np.sort(s))
i1 = 0
i2 = len(s)
while i2 - i1 > 3:
n = (i2 - i1) // 2
w = [s[n-1+i+i1] - s[i+i1] for i in range(n)]
i1 = w.index(min(w)) + i1
i2 = i1 + n
if i2 - i1 == 3:
if s[i1+1] - s[i1] < s[i2] - s[i1 + 1]:
i2 -= 1
elif s[i1+1] - s[i1] > s[i2] - s[i1 + 1]:
i1 += 1
else:
i1 = i2 = i1 + 1
return 0.5*(s[i1] + s[i2])
E_pr_do_hsm = hsm_mode(ds_do.E[0])
print ("%s: E_peak(HSM) = %.2f%%" % (ds.ph_sel, E_pr_do_hsm*100))
E_fitter = bext.bursts_fitter(ds_do, weights=None)
E_fitter.histogram(bins=np.arange(-0.2, 1, 0.03))
E_fitter.fit_histogram(model=mfit.factory_gaussian())
E_fitter.params
res = E_fitter.fit_res[0]
res.params.pretty_print()
E_pr_do_gauss = res.best_values['center']
E_pr_do_gauss
bandwidth = 0.03
E_range_do = (-0.1, 0.15)
E_ax = np.r_[-0.2:0.401:0.0002]
E_fitter.calc_kde(bandwidth=bandwidth)
E_fitter.find_kde_max(E_ax, xmin=E_range_do[0], xmax=E_range_do[1])
E_pr_do_kde = E_fitter.kde_max_pos[0]
E_pr_do_kde
mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, plot_model=False)
plt.axvline(E_pr_do_hsm, color='m', label='HSM')
plt.axvline(E_pr_do_gauss, color='k', label='Gauss')
plt.axvline(E_pr_do_kde, color='r', label='KDE')
plt.xlim(0, 0.3)
plt.legend()
print('Gauss: %.2f%%\n KDE: %.2f%%\n HSM: %.2f%%' %
(E_pr_do_gauss*100, E_pr_do_kde*100, E_pr_do_hsm*100))
nt_th1 = 50
dplot(ds_fret, hist_size, which='all', add_naa=False)
xlim(-0, 250)
plt.axvline(nt_th1)
Th_nt = np.arange(35, 120)
nt_th = np.zeros(Th_nt.size)
for i, th in enumerate(Th_nt):
ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)
nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th
plt.figure()
plot(Th_nt, nt_th)
plt.axvline(nt_th1)
nt_mean = nt_th[np.where(Th_nt == nt_th1)][0]
nt_mean
E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')
E_fitter = ds_fret.E_fitter
E_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
E_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))
E_fitter.fit_res[0].params.pretty_print()
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(E_fitter, ax=ax[0])
mfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))
display(E_fitter.params*100)
ds_fret.fit_E_m(weights='size')
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')
E_kde_w = E_fitter.kde_max_pos[0]
E_gauss_w = E_fitter.params.loc[0, 'center']
E_gauss_w_sig = E_fitter.params.loc[0, 'sigma']
E_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))
E_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr
E_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr
S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)
S_fitter = ds_fret.S_fitter
S_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
S_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(S_fitter, ax=ax[0])
mfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))
display(S_fitter.params*100)
S_kde = S_fitter.kde_max_pos[0]
S_gauss = S_fitter.params.loc[0, 'center']
S_gauss_sig = S_fitter.params.loc[0, 'sigma']
S_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))
S_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr
S_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr
S = ds_fret.S[0]
S_ml_fit = (S.mean(), S.std())
S_ml_fit
weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)
S_mean = np.dot(weights, S)/weights.sum()
S_std_dev = np.sqrt(
np.dot(weights, (S - S_mean)**2)/weights.sum())
S_wmean_fit = [S_mean, S_std_dev]
S_wmean_fit
sample = data_id
variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '
'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '
'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '
'E_pr_do_kde E_pr_do_hsm E_pr_do_gauss nt_mean\n')
variables_csv = variables.replace(' ', ',')
fmt_float = '{%s:.6f}'
fmt_int = '{%s:d}'
fmt_str = '{%s}'
fmt_dict = {**{'sample': fmt_str},
**{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}
var_dict = {name: eval(name) for name in variables.split()}
var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n'
data_str = var_fmt.format(**var_dict)
print(variables_csv)
print(data_str)
# NOTE: The file name should be the notebook name but with .csv extension
with open('results/usALEX-5samples-PR-raw-%s.csv' % ph_sel_name, 'a') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(variables_csv)
f.write(data_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load software and filenames definitions
Step2: Data folder
Step3: List of data files
Step4: Data load
Step5: Laser alternation selection
Step6: We need to define some parameters
Step7: We should check if everithing is OK with an alternation histogram
Step8: If the plot looks good we can apply the parameters with
Step9: Measurements infos
Step10: Or check the measurements duration
Step11: Compute background
Step12: Burst search and selection
Step14: Donor Leakage fit
Step15: Gaussian Fit
Step16: KDE maximum
Step17: Leakage summary
Step18: Burst size distribution
Step19: Fret fit
Step20: Weighted mean of $E$ of each burst
Step21: Gaussian fit (no weights)
Step22: Gaussian fit (using burst size as weights)
Step23: Stoichiometry fit
Step24: The Maximum likelihood fit for a Gaussian population is the mean
Step25: Computing the weighted mean and weighted standard deviation we get
Step26: Save data to file
Step27: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
Step28: This is just a trick to format the different variables
|
12,567
|
<ASSISTANT_TASK:>
Python Code:
def environmentScoreNoRounding(speciesData, nodeConfig, biomassData):
numTimesteps = len(biomassData[nodeConfig[0]['nodeId']])
scores = np.empty(numTimesteps)
for timestep in range(numTimesteps):
# Calculate the Ecosystem Score for this timestep
biomass = 0
numSpecies = 0
for node in nodeConfig:
nodeId = node['nodeId']
perUnitBiomass = node['perUnitBiomass']
# Sometimes biomass can go slightly negative.
# Clip to 0 to avoid complex numbers in score calculation.
totalBiomass = max(0, biomassData[nodeId][timestep])
if totalBiomass > 0:
numSpecies += 1
biomass += perUnitBiomass * pow(totalBiomass / perUnitBiomass,
speciesData[nodeId]['trophicLevel'])
if biomass > 0:
biomass = log2(biomass) * 5
scores[timestep] = pow(biomass, 2) + pow(numSpecies, 2)
return scores
def environmentScoreCubeRoot(speciesData, nodeConfig, biomassData):
Compute the Ecosystem Score for all timesteps for the given data and return
the score time series. The calculations are taken from
model.Ecosystem.updateEcosystemScore() in WoB_Server.
numTimesteps = len(biomassData[nodeConfig[0]['nodeId']])
scores = np.empty(numTimesteps)
for timestep in range(numTimesteps):
# Calculate the Ecosystem Score for this timestep
biomass = 0
numSpecies = 0
for node in nodeConfig:
nodeId = node['nodeId']
perUnitBiomass = node['perUnitBiomass']
# Sometimes biomass can go slightly negative.
# Clip to 0 to avoid complex numbers in score calculation.
totalBiomass = max(0, biomassData[nodeId][timestep])
if totalBiomass > 0:
numSpecies += 1
biomass += perUnitBiomass * pow(totalBiomass / perUnitBiomass,
speciesData[nodeId]['trophicLevel'])
if biomass > 0:
biomass = pow(biomass, 1/3) * 5
scores[timestep] = pow(biomass, 2) + pow(numSpecies, 2)
return scores
def shannonIndex(speciesData, nodeConfig, biomassData):
numTimesteps = len(biomassData[nodeConfig[0]['nodeId']])
scores = np.zeros(numTimesteps)
for timestep in range(numTimesteps):
individualCount = np.empty(len(nodeConfig))
for i, node in enumerate(nodeConfig):
speciesBiomass = max(0, biomassData[node['nodeId']][timestep])
individualBiomass = node['perUnitBiomass']
individualCount[i] = speciesBiomass / individualBiomass
totalIndividuals = individualCount.sum()
for i, node in enumerate(nodeConfig):
if individualCount[i] == 0:
continue
proportion = individualCount[i] / totalIndividuals
scores[timestep] -= proportion * log2(proportion)
return scores
def shannonIndexBiomass(speciesData, nodeConfig, biomassData):
numTimesteps = len(biomassData[nodeConfig[0]['nodeId']])
scores = np.zeros(numTimesteps)
for timestep in range(numTimesteps):
speciesBiomass = np.empty(len(nodeConfig))
for i, node in enumerate(nodeConfig):
speciesBiomass[i] = max(0, biomassData[node['nodeId']][timestep])
totalBiomass = speciesBiomass.sum()
for i, node in enumerate(nodeConfig):
if speciesBiomass[i] <= 0:
continue
proportion = speciesBiomass[i] / totalBiomass
scores[timestep] -= proportion * log2(proportion)
return scores
def avgShannonIndexByTrophicLevel(speciesData, nodeConfig, biomassData):
numTimesteps = len(biomassData[nodeConfig[0]['nodeId']])
scores = np.zeros(numTimesteps)
for timestep in range(numTimesteps):
# Organize species biomass values into lists by trophic level
sb = {} # species biomass by trophic level
for i, node in enumerate(nodeConfig):
trophicLevel = round(speciesData[node['nodeId']]['trophicLevel'])
biomass = max(0, biomassData[node['nodeId']][timestep])
if trophicLevel not in sb:
sb[trophicLevel] = [biomass]
else:
sb[trophicLevel].append(biomass)
# Calculate Shannon index for each trophic level
shannon = np.zeros(len(sb)) # note: index is not trophic level, which is not relevent at this point
for i, biomassList in enumerate(sb.values()):
totalBiomass = sum(biomassList)
for biomass in biomassList:
if biomass <= 0:
continue
proportion = biomass / totalBiomass
shannon[i] -= proportion * log2(proportion)
scores[timestep] = shannon.mean()
if timestep % 100 == 0:
print("timestep {}".format(timestep))
print("sb = {}".format(sb))
print("shannon = {}".format(shannon))
return scores
#score_function = environment_score
score_function = None
csvDir = '/Users/ben/SFSU/thesis/test-data/steadystate-search/3-4-5-7-13-30-31-42-45-49-50-51-52-53-57-65-72-74-75-85/biomass-data'
filenames = glob.glob(os.path.join(csvDir, '*.csv*')) + glob.glob(os.path.join(csvDir, '*.h5'))
# sort by sim number
filenames.sort(key=lambda f: (get_sim_number(f), f))
# sort descending by file size
#filenames.sort(key=lambda f: (-os.path.getsize(f), get_sim_number(f)))
file_basenames = list(map(os.path.basename, filenames))
def plotFile(file_basename):
global last_selected_file
last_selected_file = file_basename
filename = os.path.join(csvDir, file_basename)
plot_biomass_data(filename, score_function, show_legend=True, #figsize=(12,8),
#xlim=(15990, 16010),
ylim=(1e-12, 1e5), logx=False, logy=True)
#ylim=(0, 20000),
#log_scale=False)
plt.show()
try:
selectWidget = interactive(plotFile, file_basename=widgets.Select(
description="File", options=file_basenames, value=last_selected_file))
except:
selectWidget = interactive(plotFile, file_basename=widgets.Select(
description="File", options=file_basenames))
display(selectWidget)
blist = [1751.0, 1415.0]
total = sum(blist)
s = 0
for b in blist:
proportion = b / total
s -= proportion * log2(proportion)
print(s)
y = np.array([32, 16, 8, 4, 2, 1, 1, 1])
x = np.arange(len(y))
dy = (y - np.roll(y, 1))[1:]
plt.plot(x, y, label='y')
plt.plot(x[1:], dy, label='dy')
slope, intercept, r_value, p_value, std_err = stats.linregress(x[1:], dy)
plt.plot(x[1:], x[1:] * slope + intercept, label='linear regression')
plt.legend()
print("slope = {}".format(slope))
print("average = {}".format(np.mean(dy)))
print(20**3)
print(10**3 + 10**3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Using cube root instead of log
Step3: Shannon index, based on number of individuals
Step4: Shannon index, based on biomass
Step5: Biomass-based Shannon index times total biomass
Step6: Net production
Step7: Shannon index test
Step8: Net production trend
Step9: Splitting biomass between multiple species in the top trophic level lowers score
|
12,568
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Load the iris dataset
iris = datasets.load_iris()
# Create X from the features
X = iris.data
# Create y from output
y = iris.target
# Remake the variable, keeping all data where the category is not 2.
X = X[y != 2]
y = y[y != 2]
# View the features
X[0:5]
# View the target data
y
# Split the data into test and training sets, with 30% of samples being put into the test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Create a scaler object
sc = StandardScaler()
# Fit the scaler to the training data and transform
X_train_std = sc.fit_transform(X_train)
# Apply the scaler to the test data
X_test_std = sc.transform(X_test)
C = [10, 1, .1, .001]
for c in C:
clf = LogisticRegression(penalty='l1', C=c)
clf.fit(X_train, y_train)
print('C:', c)
print('Coefficient of each feature:', clf.coef_)
print('Training accuracy:', clf.score(X_train, y_train))
print('Test accuracy:', clf.score(X_test, y_test))
print('')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create The Data
Step2: View The Data
Step3: Split The Data Into Training And Test Sets
Step4: Standardize Features
Step5: Run Logistic Regression With A L1 Penalty With Various Regularization Strengths
|
12,569
|
<ASSISTANT_TASK:>
Python Code:
def ci_pendulo_doble(x, y):
# tome en cuenta que las longitudes de los eslabones son 2 y 2
l1, l2 = 2, 2
from numpy import arccos, arctan2, sqrt
# YOUR CODE HERE
raise NotImplementedError()
return q1, q2
from numpy.testing import assert_allclose
assert_allclose(ci_pendulo_doble(4, 0), (0,0))
assert_allclose(ci_pendulo_doble(0, 4), (1.57079632,0))
# YOUR CODE HERE
raise NotImplementedError()
from numpy.testing import assert_allclose
assert_allclose((q11, q21),(0.25268 , 2.636232), rtol=1e-05, atol=1e-05)
from numpy.testing import assert_allclose
assert_allclose((q12, q22),(0.589988, 1.318116), rtol=1e-05, atol=1e-05)
from numpy.testing import assert_allclose
assert_allclose((q13, q23),(0.14017 , 0.895665), rtol=1e-05, atol=1e-05)
from generacion_trayectorias import grafica_trayectoria
# YOUR CODE HERE
raise NotImplementedError()
q1s = q1s1 + q1s2 + q1s3
q2s = q2s1 + q2s2 + q2s3
from numpy.testing import assert_allclose
assert_allclose((q1s[0], q1s[-1]),(0.25268, 0.25268), rtol=1e-05, atol=1e-05)
from numpy.testing import assert_allclose
assert_allclose((q2s[0], q2s[-1]),(2.636232, 2.636232), rtol=1e-05, atol=1e-05)
from matplotlib.pyplot import figure, style
from matplotlib import animation, rc
rc('animation', html='html5')
from numpy import sin, cos, arange
fig = figure(figsize=(8, 8))
axi = fig.add_subplot(111, autoscale_on=False, xlim=(-0.6, 3.1), ylim=(-0.6, 3.1))
linea, = axi.plot([], [], "-o", lw=2, color='gray')
def cd_pendulo_doble(q1, q2):
l1, l2 = 2, 2
# YOUR CODE HERE
raise NotImplementedError()
return xs, ys
def inicializacion():
'''Esta funcion se ejecuta una sola vez y sirve para inicializar el sistema'''
linea.set_data([], [])
return linea
def animacion(i):
'''Esta funcion se ejecuta para cada cuadro del GIF'''
# YOUR CODE HERE
raise NotImplementedError()
linea.set_data(xs, ys)
return linea
ani = animation.FuncAnimation(fig, animacion, arange(1, len(q1s)), interval=10, init_func=inicializacion)
ani
from numpy.testing import assert_allclose
assert_allclose(cd_pendulo_doble(0, 0), ([0,2,4], [0,0,0]), rtol=1e-05, atol=1e-05)
assert_allclose(cd_pendulo_doble(1.57079632,0), ([0, 0, 0],[0, 2, 4]), rtol=1e-05, atol=1e-05)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Obtenga las posiciones en el espacio articular, $q_1$ y $q_2$, necesarias para que el punto final del pendulo doble llegue a las coordenadas $p_1 = (0,1)$, $p_2 = (1,3)$ y $p_3 = (3,2)$.
Step2: Genere las trayectorias necesarias para que el pendulo doble se mueva del punto $p_1$ al punto $p_2$ en $2s$, del punto $p_2$ al punto $p_3$ en $2s$ y del punto $p_3$ al punto $p_1$ en $2s$.
Step3: Cree una animación con las trayectorias generadas y las funciones proporcionadas a continuación (algunas funciones estan marcadas con comentarios en donde hace falta agregar código).
|
12,570
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
from phoebe import u # units
logger = phoebe.logger()
b = phoebe.default_binary(contact_binary=True)
b.add_dataset('lc', times=phoebe.linspace(0,0.5,101))
b.run_compute(irrad_method='none', model='no_spot')
b.add_feature('spot', component='primary', feature='spot01', relteff=0.9, radius=20, colat=90, long=-45)
b.run_compute(irrad_method='none', model='with_spot')
afig, mplfig = b.plot(show=True, legend=True)
b.remove_dataset(kind='lc')
b.remove_model(model=['with_spot', 'no_spot'])
b.add_dataset('mesh', compute_times=b.to_time(0.25), columns='teffs')
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
b.set_value('long', value=-30)
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
b.set_value('long', value=0.0)
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
b.set_value('radius', value=40)
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new bundle.
Step2: Model without Spots
Step3: Adding Spots
Step4: Comparing Light Curves
Step5: Spots near the "neck"
Step6: Now if we set the long closer to the neck, we'll see it get cropped by the boundary between the two components. If we need a spot that crosses between the two "halves" of the contact, we'd have to add separate spots to each component, with each getting cropped at the boundary.
Step7: If we set long to zero, the spot completely disappears (as there is nowhere in the neck that is still on the surface.
Step8: But if we increase the radius large enough, we'll get a ring.
|
12,571
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import lifelines
import matplotlib.pylab as plt
%matplotlib inline
data = lifelines.datasets.load_dd()
data.head()
data.tail()
from lifelines import KaplanMeierFitter
kmf = KaplanMeierFitter()
# kaplan-meier
# KaplanMeierFitter.fit(event_times, event_observed=None,
# timeline=None, label='KM-estimate',
# alpha=None)
Parameters:
event_times: an array, or pd.Series, of length n of times that
the death event occured at
event_observed: an array, or pd.Series, of length n -- True if
the death was observed, False if the event was lost
(right-censored). Defaults all True if event_observed==None
timeline: set the index of the survival curve to this postively increasing array.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals.
Overrides the initializing alpha for this call to fit only.
Returns:
self, with new properties like 'survival_function_'
T = data["duration"]
C = data["observed"]
kmf.fit(T, event_observed=C)
kmf.survival_function_.plot()
plt.title('Survival function of political regimes');
kmf.plot()
kmf.median_
## A leader is elected there is a 50% chance he or she will be gone in 3 years.
ax = plt.subplot(111)
dem = (data["democracy"] == "Democracy")
kmf.fit(T[dem], event_observed=C[dem], label="Democratic Regimes")
kmf.plot(ax=ax, ci_force_lines=True)
kmf.fit(T[~dem], event_observed=C[~dem], label="Non-democratic Regimes")
kmf.plot(ax=ax, ci_force_lines=True)
## ci_force_lines : force the confidence intervals to be line plots
plt.ylim(0,1);
plt.title("Lifespans of different global regimes");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: political leaders
|
12,572
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csir-csiro', 'sandbox-2', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
12,573
|
<ASSISTANT_TASK:>
Python Code:
# some random heights of the family
height = [1.75, 1.65, 1.71, 1.89, 1.79]
# some random weights of the family
weight = [65.4, 59.2, 63.6, 88.4, 68.7]
# Now if we go to calculate BMI
weight / height ** 2
import numpy as np # selective import
# Convet the followoing list to numpy arrays
height = [1.75, 1.65, 1.71, 1.89, 1.79]
weight = [65.4, 59.2, 63.6, 88.4, 68.7]
np_height = np.array( height )
np_weight = np.array( weight )
# Let's confirm this as numpy arrray
type(np_height)
type(np_weight)
bmi = np_weight / np_height ** 2
bmi
# A numpy arary with different types
np.array( [1, 2.5, "are different", True ] )
# a simple python list
py_list = [ 1, 2, 3 ]
# a numpy array
numpy_array = np.array([1, 2, 3])
remarks:
+ If we add py_list with itself, it will generate a list of
new length.
+ Whereas, if we add the numpy_array, it would perform,
"element wise addition"
Warning:
Again be careful while using different python types in a numpy arary.
py_list + py_list
numpy_array + numpy_array
bmi
# get the fourth elemnt from the numpy array "bmi"
print("The bmi of the fourth element is: " + str( bmi[3] ) )
# slice and dice
print("\nThe bmi's from 2nd to 3rd element is: " + str( bmi[2 : 4] ) )
Specifically for Numpy, there's another way to do list
subsetting via "booleans", here's how.
print("\nList of bmi have bmi larger than 23: " + str( bmi > 23 ) )
# Next, use this boolean arary to do subsetting
print("\nThe element with the largest bmi is: " + str(bmi[ bmi > 23 ]) )
Instructions:
+ Import the "numpy" package as "np", so that you can refer to "numpy" with "np".
+ Use "np.array()" to create a Numpy array from "baseball". Name this array "np_baseball".
+ Print out the "type of np_baseball" to check that you got it right.
# Create list baseball
baseball = [180, 215, 210, 210, 188, 176, 209, 200]
# Import the numpy package as np
import numpy as np
# Create a Numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
print(np_baseball)
# Print out type of np_baseball
print(type( np_baseball) )
Instructions:
+ Create a Numpy array from height. Name this new array np_height.
+ Print "np_height".
+ Multiply "np_height" with 0.0254 to convert all height measurements from inches to meters.
- Store the new values in a new array, "np_height_m".
+ Print out np_height_m and check if the output makes sense.
# height is available as a regular list
# http://wiki.stat.ucla.edu/socr/index.php/SOCR_Data_MLB_HeightsWeights#References
# Import numpy
import numpy as np
# Create a Numpy array from height: np_height
np_height = np.array( height )
# Print out np_height
print("The Height of the baseball players are: " + str( np_height ) )
# Convert np_height to m: np_height_m
np_height_m = np_height * 0.0254 # a inch is 0.0245 meters
# Print np_height_m
print("\nThe Height of the baseball players in meters are: " + str( np_height_m ) )
Instructions:
+ Create a Numpy array from the weight list with the correct units.
- Multiply by 0.453592 to go from pounds to kilograms.
- Store the resulting Numpy array as np_weight_kg.
+ Use np_height_m and np_weight_kg to calculate the BMI of each player.
- Use the following equation:
BMI = weight( kg ) / height( m )
- Save the resulting numpy array as "bmi".
+ Print out "bmi".
# height and weight are available as a regular lists
# Import numpy
import numpy as np
# Create array from height with correct units: np_height_m
np_height_m = np.array(height) * 0.0254
# Create array from weight with correct units: np_weight_kg
np_weight_kg = np.array( weight ) * 0.453592
# Calculate the BMI: bmi
bmi = np_weight_kg / np_height_m ** 2
# Print out bmi
print("\nThe Bmi of all the baseball players are: " + str( bmi ) )
Instructions:
+ Create a boolean Numpy array:
- the element of the array should be "True",
- If the corresponding baseball player's BMI is below 21.
- You can use the "<" operator for this
- Name the array "light", Print the array "light".
+ Print out a Numpy array with the BMIs of all baseball players whose BMI is below 21.
- Use "light" inside square brackets to do a selection on the bmi array.
# height and weight are available as a regular lists
# Import numpy
import numpy as np
# Calculate the BMI: bmi
np_height_m = np.array(height) * 0.0254
np_weight_kg = np.array(weight) * 0.453592
bmi = np_weight_kg / (np_height_m ** 2)
# Create the light array
light = np.array( bmi < 21 )
# Print out light
print("\nLightweight baseball players" + str( light ) )
# Print out BMIs of all baseball players whose BMI is below 21
print(bmi[ light < 21 ])
Instructions:
+ Subset np_weight: print out the element at index 50.
+ Print out a sub-array of np_height: It contains the elements at index 100 up to and including index 110
# height and weight are available as a regular lists
# Import numpy
import numpy as np
# Store weight and height lists as numpy arrays
np_weight = np.array(weight)
np_height = np.array(height)
# Print out the weight at index 50
# Ans: print(np_weight[50])
# Print out sub-array of np_height: index 100 up to and including index 110
# Ans: print(np_height[100 : 111])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solution
Step2: Note
Step4: Numpy
Step6: Numpy Subsetting
Step8: Exercise
Step10: 2. Baseball player's height
Step12: 3. Baseball player's BMI
Step14: 4. Leightweight baseball players
Step16: 5. Numpy Side Effect
|
12,574
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import statsmodels.api as sm
import numpy as np
import pandas as pd
data = sm.datasets.sunspots.load()
from datetime import datetime
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
endog = pd.Series(data.endog, index=dates)
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
print(ar_res.data.predict_dates)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting started
Step2: Right now an annual date series must be datetimes at the end of the year.
Step3: Using Pandas
Step4: Instantiate the model
Step5: Out-of-sample prediction
Step6: Using explicit dates
Step7: This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
|
12,575
|
<ASSISTANT_TASK:>
Python Code:
if boolean-expression:
statements-when-true
else:
statemrnts-when-false
x = 15
y = 20
z = 2
x > y
z*x <= y
y >= x-z
z*10 == x
raining = False
snowing = True
age = 45
age < 18 and raining
age >= 18 and not snowing
not snowing or not raining
age == 45 and not snowing
if boolean-expression1:
statements-when-exp1-true
elif boolean-expression2:
statements-when-exp2-true
elif boolean-expression3:
statements-when-exp3-true
else:
statements-none-are-true
x = int(input("enter an integer"))
# one single statement. only one block executes
if x>10:
print("A:bigger than 10")
elif x>20:
print("A:bigger than 20")
# Independent if's, each True Boolean executes a block
if x>10:
print("B:bigger than 10")
if x>20:
print("B:bigger than 20")
if x > 20:
if y == 4:
print("One")
elif y > 4:
print("Two")
else:
print("Three")
else:
print("Four")
try:
statements-which
might-throw-an-error
except errorType1:
code-when-Type1-happens
except errorType2:
code-when-Type2-happens
finally:
code-happens-regardless
try:
x = float(input("Enter a number: "))
if x > 0:
y = "a"
else:
y = "b"
except ValueError:
y = "c"
print(y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python’s Relational Operators
Step2: A. 4
Step3: A. 4
Step4: elif versus a series of if statements
Step5: Check Yourself
Step6: A. One
Step7: Watch Me Code 2
|
12,576
|
<ASSISTANT_TASK:>
Python Code:
[10, 20, 30, 40]
['dog', 'fish', 'bird']
['bob', 3.14, 42, ['sam', 55]]
cheeses = ['Chedder', 'Pepper Jack', 'Queso Fresca']
grades = [99, 84, 91]
empty = []
cheeses = ['Chedder', 'Pepper Jack', 'Queso Fresca']
cheeses[0] = 'Gouda'
print( cheeses )
for cheese in cheeses:
print( cheese )
numbers = [1, 2, 3]
for i in range( len( numbers ) ):
numbers[i] = numbers[i] * 2
print( numbers )
nested_list = ['spam', 1, [ 'Brie', 'Roquefort' , 'Pol le Veq'], [1, 2, 3] ]
len( nested_list )
a = [ 1, 2, 3 ]
b = [ 4, 5, 6 ]
c = a + b
print( c )
d = [ 0 ] * 4
print( d )
e = [ 1, 2, 3 ] * 3
print( e )
f = [ 'a', 'b', 'c', 'd', 'e', 'f' ]
print( f[1:3] )
print( f[:4] )
print( f[3:] )
f[1:3] = [ 'x', 'y' ]
print( f )
list1 = [ 'a', 'b', 'c' ]
list1.append( 'd' )
print( list1 )
list2 = [ 'a', 'b', 'c' ]
list3 = [ 'd', 'e', ]
list2.extend( list3 )
print( list2 )
print( list3 )
list4 = [ 'd', 'c', 'e', 'a', 'b' ]
list4.sort()
print( list4 )
def add_all( a_list ):
total = 0
for value in a_list:
total += value
return total
list_of_numbers = [ 1, 2, 3 ]
sum( list_of_numbers )
def capitalize_all( a_list ):
result = []
for a_string in a_list:
result.append( a_string.capitalize() )
return result
words = [ 'life', 'the', 'universe', 'and', 'everything' ]
capitalized_words = capitalize_all( words )
print( capitalized_words )
def only_upper( a_list ):
result = []
for a_string in a_list:
if( a_string.isupper() ):
result.append( a_string )
return result
words = [ 'LIFE', 'The', 'uNiverse', 'AND', 'everything' ]
some_words = only_upper( words )
print( some_words )
a_list = [ 'a', 'b', 'c' ]
value = a_list.pop( 1 )
print( a_list )
print( value )
a_list = [ 'a', 'b', 'c' ]
del a_list[1]
print( a_list )
a_list = [ 'a', 'b', 'c', 'd', 'e', 'f' ]
del a_list[1:5]
print( a_list )
a_list = [ 'a', 'b', 'c', 'a', 'b', 'c' ]
a_list.remove( 'b' )
print( a_list )
a_string = 'spam'
a_list = list( a_string )
print( a_list )
a_string = 'pining for the fjords'
a_list = a_string.split()
print( a_list )
a_string = 'spam-spam-spam'
delimiter = '-'
a_string.split( delimiter )
a_list = [ 'pining', 'for', 'the', 'fjords' ]
delimiter = ' '
delimiter.join( a_list )
a = 'banana'
b = 'banana'
a is b
a = [ 1, 2, 3 ]
b = [ 1, 2, 3 ]
a is b
a = [ 1, 2, 3 ]
b = a
b[0] = 17
print( a )
def delete_head( a_list )
del a_list[0]
letters = [ 'a', 'b', 'c' ]
delete_head( letters )
print( letters )
list1 = [ 1, 2 ]
list2 = list1.append( 3 )
print( list1 )
print( list2 )
def cumulative_sum( a_list ):
# INSERT YOUR CODE HERE
return []
numbers = [ 1, 2, 3 ]
result = cumulative_sum( numbers )
# The answer should be:
# [ 1, 3, 6 ]
def chop( a_list ):
# INSERT YOUR CODE HERE
print( 'Remove this line' ) # Jupyter needs a statement to compile
numbers = [ 1, 2, 3, 4 ]
chop( numbers )
print( numbers )
# Should print: [ 2, 3 ]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The elements of a list don't have to be the same data type in Python (in other languages they do)
Step2: As you can see, lists can be nested
Step3: Lists are mutable
Step4: Note that like strings, indices start at 0
Step5: However, if you need the index, you need to use the range and len functions
Step6: A for loop over an empty list never executes the body of the loop
Step7: List operations
Step8: Similarly, the * operator repeats a list a specified number of times
Step9: List slices
Step10: Just as with strings, if you omit the first index the slice starts at the beginning
Step11: List methods
Step12: extend takes a list as an argument and appends all of the elements
Step13: Note that list3 is unmodified
Step14: All of these list methods are void
Step15: As we have seen before, the += operator updates a variable
Step16: A function that combines a sequence of elements into a single value is sometimes called a reduce function
Step17: This is sometimes called a map function since it "maps" a function onto each of the elements in a sequence
Step18: Deleting elements
Step19: Note that the function returns the deleted element
Step20: del can also be used with a slice index to remove more than one element
Step21: If you know the element you want to remove, but not the index, use the remove operator
Step22: Lists and strings
Step23: Since it is an operator, don’t use list as a variable name
Step24: By default, the delimiter is a space
Step25: join is the inverse of split
Step26: Objects and values
Step27: Note that this is different from the values of the two objects being equivalent
Step28: In the first example, Python only create one string object
Step29: Be careful because this can cause problems if you aren’t paying attention
Step30: See the stack diagram on pg. 97 of the textbook
Step31: Debugging
Step32: Write a function called chop that takes a list and modifies it by removing the first and last element. The function should return None.
|
12,577
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+git://github.com/lindermanlab/ssm-jax-refactor.git
import ssm
import jax.random as jr
import jax.numpy as np
import matplotlib.pyplot as plt
from tensorflow_probability.substrates import jax as tfp
from ssm.hmm import BernoulliHMM
from ssm.plots import gradient_cmap
from ssm.utils import find_permutation
import warnings
import seaborn as sns
sns.set_style("white")
sns.set_context("talk")
color_names = ["windows blue", "red", "amber", "faded green", "dusty purple", "orange"]
colors = sns.xkcd_palette(color_names)
cmap = gradient_cmap(colors)
def plot_transition_matrix(transition_matrix):
plt.imshow(transition_matrix, vmin=0, vmax=1, cmap="Greys")
plt.xlabel("next state")
plt.ylabel("current state")
plt.colorbar()
plt.show()
def compare_transition_matrix(true_matrix, test_matrix):
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
out = axs[0].imshow(true_matrix, vmin=0, vmax=1, cmap="Greys")
axs[1].imshow(test_matrix, vmin=0, vmax=1, cmap="Greys")
axs[0].set_title("True Transition Matrix")
axs[1].set_title("Test Transition Matrix")
cax = fig.add_axes(
[
axs[1].get_position().x1 + 0.07,
axs[1].get_position().y0,
0.02,
axs[1].get_position().y1 - axs[1].get_position().y0,
]
)
plt.colorbar(out, cax=cax)
plt.show()
def plot_hmm_data(obs, states):
lim = 1.01 * abs(obs).max()
time_bins, obs_dim = obs.shape
plt.figure(figsize=(8, 3))
plt.imshow(
states[None, :],
aspect="auto",
cmap=cmap,
vmin=0,
vmax=len(colors) - 1,
extent=(0, time_bins, -lim, (obs_dim) * lim),
)
for d in range(obs_dim):
plt.plot(obs[:, d] + lim * d, "-k")
plt.xlim(0, time_bins)
plt.xlabel("time")
plt.yticks(lim * np.arange(obs_dim), ["$x_{}$".format(d + 1) for d in range(obs_dim)])
plt.title("Simulated data from an HMM")
plt.tight_layout()
def plot_posterior_states(Ez, states, perm):
plt.figure(figsize=(25, 5))
plt.imshow(Ez.T[perm], aspect="auto", interpolation="none", cmap="Greys")
plt.plot(states, label="True State")
plt.plot(Ez.T[perm].argmax(axis=0), "--", label="Predicted State")
plt.xlabel("time")
plt.ylabel("latent state")
# plt.legend(bbox_to_anchor=(1,1))
plt.title("Predicted vs. Ground Truth Latent State")
# plt.show()
num_states = 5
num_channels = 10
transition_matrix = 0.90 * np.eye(num_states) + 0.10 * np.ones((num_states, num_states)) / num_states
true_hmm = BernoulliHMM(
num_states, num_emission_dims=num_channels, transition_matrix=transition_matrix, seed=jr.PRNGKey(0)
)
plot_transition_matrix(true_hmm.transition_matrix)
rng = jr.PRNGKey(0)
num_timesteps = 500
states, data = true_hmm.sample(rng, num_timesteps)
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(20, 8))
axs[0].imshow(data.T, aspect="auto", interpolation="none")
# axs[0].set_ylabel("neuron")
axs[0].set_title("Observations")
axs[1].plot(states)
axs[1].set_title("Latent State")
axs[1].set_xlabel("time")
axs[1].set_ylabel("state")
plt.savefig("bernoulli-hmm-data.pdf")
plt.savefig("bernoulli-hmm-data.png")
plt.show()
test_hmm = BernoulliHMM(num_states, num_channels, seed=jr.PRNGKey(32))
lps, test_hmm, posterior = test_hmm.fit(data, method="em", tol=-1)
# Plot the log probabilities
plt.plot(lps)
plt.xlabel("iteration")
plt.ylabel("log likelihood")
test_hmm.transition_matrix
# Compare the transition matrices
compare_transition_matrix(true_hmm.transition_matrix, test_hmm.transition_matrix)
plt.savefig("bernoulli-hmm-transmat-comparison.pdf")
# Posterior distribution
Ez = posterior.expected_states.reshape(-1, num_states)
perm = find_permutation(states, np.argmax(Ez, axis=-1))
plot_posterior_states(Ez, states, perm)
plt.savefig("bernoulli-hmm-state-est-comparison.pdf")
plt.savefig("bernoulli-hmm-state-est-comparison.png")
plt.show()
rng = jr.PRNGKey(0)
num_timesteps = 500
num_trials = 5
all_states, all_data = true_hmm.sample(rng, num_timesteps, num_samples=num_trials)
# Now we have a batch dimension of size `num_trials`
print(all_states.shape)
print(all_data.shape)
lps, test_hmm, posterior = test_hmm.fit(all_data, method="em", tol=-1)
# plot marginal log probabilities
plt.title("Marginal Log Probability")
plt.ylabel("lp")
plt.xlabel("idx")
plt.plot(lps / data.size)
compare_transition_matrix(true_hmm.transition_matrix, test_hmm.transition_matrix)
# For the first few trials, let's see how good our predicted states are
for trial_idx in range(3):
print("=" * 5, f"Trial: {trial_idx}", "=" * 5)
Ez = posterior.expected_states[trial_idx]
states = all_states[trial_idx]
perm = find_permutation(states, np.argmax(Ez, axis=-1))
plot_posterior_states(Ez, states, perm)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports and Plotting Functions
Step2: Bernoulli HMM
Step3: From the true model, we can sample synthetic data
Step4: Let's view the synthetic data
Step5: Fit HMM using exact EM update
Step6: Fit Bernoulli Over Multiple Trials
|
12,578
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from datetime import datetime
import trulia.stats
import geocoder
import json
from datetime import timedelta
from collections import defaultdict
import time
import requests
from statsmodels.graphics import tsaplots
import ConfigParser as cp
cparser = cp.ConfigParser()
cparser.readfp(open('config.properties'))
api_key = cparser.get('weather', 'api_key')
cache = defaultdict(str)
g = geocoder.google('Chapel Hill, NC')
sd = '2010-01-01'
ed = '2016-04-16'
sd2 = pd.to_datetime(sd).to_datetime()
ed2 = pd.to_datetime(ed).to_datetime()
# This is long running, so I like to cache it, walk away, and come back to my nice dataset
x = sd2 + timedelta(hours=9)
while x < datetime.now():
url = 'https://api.forecast.io/forecast/{}/{},{},{}'.format(api_key, g.lat, g.lng, x.strftime("%Y-%m-%dT%H:%M:%S"))
if cache[url] == '':
r2 = requests.get(url)
time.sleep(.2)
resp = json.loads(r2.content)
cache[url] = resp
x = x + timedelta(days=1)
times = []
temps = []
x = sd2 + timedelta(hours=9)
while x < datetime.now():
url = 'https://api.forecast.io/forecast/{}/{},{},{}'.format(api_key, g.lat, g.lng, x.strftime("%Y-%m-%dT%H:%M:%S"))
resp = cache[url]
times.append(x)
temps.append(resp['currently']['temperature'])
x = x + timedelta(days=1)
df2 = pd.DataFrame({'time': times, 'temp': temps})
df2.set_index('time', inplace=True)
plt.figure(figsize=(15,5))
plt.plot(df2)
plt.title('Chapel Hill, NC weather')
plt.show()
fig = tsaplots.plot_acf(df2[0:700], ax=None)
fig.set_figwidth(20)
fig.set_figheight(5)
fig = tsaplots.plot_pacf(df2[0:400], ax=None)
fig.set_figwidth(20)
fig.set_figheight(5)
cparser = cp.ConfigParser()
cparser.readfp(open('config.properties'))
tkey = cparser.get('trulia', 'key')
zc = '90008'
data = trulia.stats.TruliaStats(tkey).get_zip_code_stats(zip_code=zc, start_date=sd, end_date=ed)
weeks = []
medians = []
for week in data['listingStats']['listingStat']:
weeks.append(week['weekEndingDate'])
medians.append(week['listingPrice']['subcategory'][0]['medianListingPrice'])
df = pd.DataFrame({'week': weeks, 'medianPrice': medians})
df['week'] = pd.to_datetime(df['week'])
df['medianPrice'] = df['medianPrice'].astype(float)
df.sort('week', inplace=True)
df.set_index('week', inplace=True)
plt.figure(figsize=(15,5))
plt.plot(df)
plt.ylabel('Median Listing Price')
plt.gca().get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
plt.show()
fig = tsaplots.plot_acf(df, ax=None)
fig.set_figwidth(20)
fig.set_figheight(5)
fig = tsaplots.plot_pacf(df, ax=None)
fig.set_figwidth(20)
fig.set_figheight(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below is a time series of the weather in Chapel Hill, NC every morning over a few years. You can clearly see an annual cyclic pattern, which should be no suprise to anyone. Yet, you can also see a fair amount of variance from day to day. Even if you de-trend the annual cycle, we can see that this would not be enough for yesterday's temperature to perfectly predict today's weather.
Step2: Below is a correlogram of the ACF (auto-correlative function). For very low values of lag (comparing the most recent temperature measurement to the values of previous days), we can see a quick drop-off. This tells us that weather correlates very highly, but decliningly so, with recent days.
Step3: For that reason, we also want to review the PACF (partial auto-correlative function) which subtracts the correlation of previous days for each lag so that we get an estimate of what each of those days actually contributes to the most recent observation. In the plots below of the same data, we see all the seasonal and annual correlations disappear. We expect this because most of the information about how the weather depends on the past is already contained in the most recent few days.
Step4: The boundaries shown in the above plots represent a measure of statistical significant. Any points outside this rang are considered statistically significant. Those below it are not.
Step5: The plot below shows the time series of the median listing price (note, that's not the same as the sale price) on a daily basis over the past few years.
Step6: Let's first take a look at it's ACF below. For price, we see (no surprise) that recent listing prices are pretty good predictors of current listing prices. Unless some catastrophe or major event (like discovery of oil or a large gold vein) changed things overnight, home prices should have relatively stable short term prices, and therefore, be very auto-correlative.
Step7: As we did previously, we now want to look at the PACF (below) which shows us that the two most recent days have the most useful information. Although not surprising, I was wondering if we might find some interesting effects related to houses being listed on weekdays vs. weekends, or at specific times of the month. However, it seems that when dealing with such large amounts of money, people have a bit more patience. Perhaps selling a car or a smaller item might show some periodic lags, but the home prices do not.
|
12,579
|
<ASSISTANT_TASK:>
Python Code:
%tensorflow_version 1.x
!curl -Lo deepchem_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import deepchem_installer
%time deepchem_installer.install(version='2.3.0')
import deepchem as dc
tasks, datasets, transformers = dc.molnet.load_muv()
train_dataset, valid_dataset, test_dataset = datasets
train_smiles = train_dataset.ids
valid_smiles = valid_dataset.ids
tokens = set()
for s in train_smiles:
tokens = tokens.union(set(c for c in s))
tokens = sorted(list(tokens))
from deepchem.models.optimizers import Adam, ExponentialDecay
max_length = max(len(s) for s in train_smiles)
batch_size = 100
batches_per_epoch = len(train_smiles)/batch_size
model = dc.models.SeqToSeq(tokens,
tokens,
max_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=256,
model_dir='fingerprint',
batch_size=batch_size,
learning_rate=ExponentialDecay(0.004, 0.9, batches_per_epoch))
def generate_sequences(epochs):
for i in range(epochs):
for s in train_smiles:
yield (s, s)
model.fit_sequences(generate_sequences(40))
predicted = model.predict_from_sequences(valid_smiles[:500])
count = 0
for s,p in zip(valid_smiles[:500], predicted):
if ''.join(p) == s:
count += 1
print('reproduced', count, 'of 500 validation SMILES strings')
train_embeddings = model.predict_embeddings(train_smiles)
train_embeddings_dataset = dc.data.NumpyDataset(train_embeddings,
train_dataset.y,
train_dataset.w,
train_dataset.ids)
valid_embeddings = model.predict_embeddings(valid_smiles)
valid_embeddings_dataset = dc.data.NumpyDataset(valid_embeddings,
valid_dataset.y,
valid_dataset.w,
valid_dataset.ids)
classifier = dc.models.MultitaskClassifier(n_tasks=len(tasks),
n_features=256,
layer_sizes=[512])
classifier.fit(train_embeddings_dataset, nb_epoch=10)
import numpy as np
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode="classification")
train_score = classifier.evaluate(train_embeddings_dataset, [metric], transformers)
valid_score = classifier.evaluate(valid_embeddings_dataset, [metric], transformers)
print('Training set ROC AUC:', train_score)
print('Validation set ROC AUC:', valid_score)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's start by loading the data. We will use the MUV dataset. It includes 74,501 molecules in the training set, and 9313 molecules in the validation set, so it gives us plenty of SMILES strings to work with.
Step2: We need to define the "alphabet" for our SeqToSeq model, the list of all tokens that can appear in sequences. (It's also possible for input and output sequences to have different alphabets, but since we're training it as an autoencoder, they're identical in this case.) Make a list of every character that appears in any training sequence.
Step3: Create the model and define the optimization method to use. In this case, learning works much better if we gradually decrease the learning rate. We use an ExponentialDecay to multiply the learning rate by 0.9 after each epoch.
Step4: Let's train it! The input to fit_sequences() is a generator that produces input/output pairs. On a good GPU, this should take a few hours or less.
Step5: Let's see how well it works as an autoencoder. We'll run the first 500 molecules from the validation set through it, and see how many of them are exactly reproduced.
Step6: Now we'll trying using the encoder as a way to generate molecular fingerprints. We compute the embedding vectors for all molecules in the training and validation datasets, and create new datasets that have those as their feature vectors. The amount of data is small enough that we can just store everything in memory.
Step7: For classification, we'll use a simple fully connected network with one hidden layer.
Step8: Find out how well it worked. Compute the ROC AUC for the training and validation datasets.
|
12,580
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
stats = pd.read_csv(r'data/Seasons_Stats.csv', index_col=0)
stats = pd.read_csv(r'data/Seasons_Stats.csv', index_col=0)
stats_clean = stats.drop(['blanl', 'blank2', 'Tm'], axis=1)
stats_clean.head()
players = pd.read_csv(r'data/players.csv', index_col=0)
players.head(10)
data = pd.merge(stats_clean, players[['Player', 'height', 'weight']], left_on='Player', right_on='Player', right_index=False,
how='left', sort=False).fillna(value=0)
data = data[~(data['Pos']==0) & (data['MP'] > 200)]
data.reset_index(inplace=True, drop=True)
data['Player'] = data['Player'].str.replace('*','')
totals = ['PER', 'OWS', 'DWS', 'WS', 'OBPM', 'DBPM', 'BPM', 'VORP', 'FG', 'FGA', '3P', '3PA', '2P', '2PA', 'FT', 'FTA',
'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']
for col in totals:
data[col] = 36 * data[col] / data['MP']
data.tail()
X = data.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = data['Pos'].as_matrix()
encoder = LabelBinarizer()
y_cat = encoder.fit_transform(y)
nlabels = len(encoder.classes_)
scaler =StandardScaler()
Xnorm = scaler.fit_transform(X)
stats2017 = (data['Year'] == 2017)
X_train = Xnorm[~stats2017]
y_train = y_cat[~stats2017]
X_test = Xnorm[stats2017]
y_test = y_cat[stats2017]
model = Sequential()
model.add(Dense(40, activation='relu', input_dim=46))
model.add(Dropout(0.5))
model.add(Dense(30, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nlabels, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.
model.fit(X_train, y_train, epochs=200, batch_size=128, validation_split=0.2, verbose=1)
model.test_on_batch(X_test, y_test, sample_weight=None)
# Production model, using all data
model.fit(X_train, y_train, epochs=200, batch_size=128, validation_split=0, verbose=1)
first_team_members = ['Russell Westbrook', 'James Harden', 'Anthony Davis', 'LeBron James', 'Kawhi Leonard']
first_team_stats = data[[((x[1]['Player'] in first_team_members) & (x[1]['Year']==2017)) for x in data.iterrows()]]
first_team_stats
pd.DataFrame(index=first_team_stats.loc[:, 'Player'].values, data={'Real': first_team_stats.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm[first_team_stats.index, :]))})
mvp = [(1956, 'Bob Pettit'), (1957, 'Bob Cousy'), (1958, 'Bill Russell'), (1959, 'Bob Pettit'),
(1960, 'Wilt Chamberlain'), (1961, 'Bill Russell'), (1962, 'Bill Russell'), (1963, 'Bill Russell'),
(1964, 'Oscar Robertson'), (1965, 'Bill Russell'), (1966, 'Wilt Chamberlain'), (1967, 'Wilt Chamberlain'),
(1968, 'Wilt Chamberlain'), (1969, 'Wes Unseld'), (1970, 'Willis Reed'), (1971, 'Lew Alcindor'),
(1972, 'Kareem Abdul-Jabbar'), (1973, 'Dave Cowens'), (19704, 'Kareem Abdul-Jabbar'), (1975, 'Bob McAdoo'),
(1976, 'Kareem Abdul-Jabbar'), (1977, 'Kareem Abdul-Jabbar'), (1978, 'Bill Walton'), (1979, 'Moses Malone'),
(1980, 'Kareem Abdul-Jabbar'), (1981, 'Julius Erving'), (1982, 'Moses Malone'), (1983, 'Moses Malone'),
(1984, 'Larry Bird'), (1985, 'Larry Bird'), (1986, 'Larry Bird'), (1987, 'Magic Johnson'),
(1988, 'Michael Jordan'), (1989, 'Magic Johnson'), (1990, 'Magic Johnson'), (1991, 'Michael Jordan'),
(1992, 'Michael Jordan'), (1993, 'Charles Barkley'), (1994, 'Hakeem Olajuwon'), (1995, 'David Robinson'),
(1996, 'Michael Jordan'), (1997, 'Karl Malone'), (1998, 'Michael Jordan'), (1999, 'Karl Malone'),
(2000, 'Shaquille O\'Neal'), (2001, 'Allen Iverson'), (2002, 'Tim Duncan'), (2003, 'Tim Duncan'),
(2004, 'Kevin Garnett'), (2005, 'Steve Nash'), (2006, 'Steve Nash'), (2007, 'Dirk Nowitzki'),
(2008, 'Kobe Bryant'), (2009, 'LeBron James'), (2010, 'LeBron James'), (2011, 'Derrick Rose'),
(2012, 'LeBron James'), (2013, 'LeBron James'), (2014, 'Kevin Durant'), (2015, 'Stephen Curry'),
(2016, 'Stephen Curry')]
mvp_stats = pd.concat([data[(data['Player'] == x[1]) & (data['Year']==x[0])] for x in mvp], axis=0)
mvp_stats
mvp_pred = pd.DataFrame(index=mvp_stats.loc[:, 'Player'].values, data={'Real': mvp_stats.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm[mvp_stats.index, :]))})
mvp_pred
curry2017 = data[(data['Player'] == 'Stephen Curry') & (data['Year']==2017)]
pettit1956 = data[(data['Player'] == 'Bob Pettit') & (data['Year']==1956)]
time_travel_curry = pd.concat([curry2017 for year in range(1956, 2018)], axis=0)
time_travel_curry['Year'] = range(1956, 2018)
X = time_travel_curry.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = time_travel_curry['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
time_travel_curry_pred = pd.DataFrame(index=time_travel_curry.loc[:, 'Year'].values,
data={'Real': time_travel_curry.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
time_travel_pettit = pd.concat([pettit1956 for year in range(1956, 2018)], axis=0)
time_travel_pettit['Year'] = range(1956, 2018)
X = time_travel_pettit.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = time_travel_pettit['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
time_travel_pettit_pred = pd.DataFrame(index=time_travel_pettit.loc[:, 'Year'].values,
data={'Real': time_travel_pettit.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
pd.concat([time_travel_curry_pred,time_travel_pettit_pred],axis=1,keys=['Stephen Curry','Bob Pettit'])
magic = data[(data['Player'] == 'Magic Johnson')]
jordan = data[(data['Player'] == 'Michael Jordan')]
# Magic
X = magic.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = magic['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
magic_pred = pd.DataFrame(index=magic.loc[:, 'Age'].values,
data={'Real': magic.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
# Jordan
X = jordan.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = jordan['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
jordan_pred = pd.DataFrame(index=jordan.loc[:, 'Age'].values,
data={'Real': jordan.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
pd.concat([magic_pred,jordan_pred],axis=1,keys=['Magic Johnson','Michael Jordan'])
first_team_stats
multiplier = np.arange(0.8,1.2,0.02)
growing_predicted = []
for p in first_team_stats.iterrows():
growing = pd.concat([p[1].to_frame().T for x in multiplier], axis=0)
growing['height'] = growing['height'] * multiplier
growing['weight'] = growing['weight'] * (multiplier ** 3)
X = growing.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = growing['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
growing_predicted.append(pd.DataFrame(index=multiplier, data={'height': growing.loc[:, 'height'].values,
'Real': growing.loc[:, 'Pos'].values, 'Predicted':encoder.inverse_transform(model.predict(Xnorm))}))
pd.concat(growing_predicted,axis=1,keys=first_team_stats['Player'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preparation
Step2: The file Seasons_Stats.csv contains the statics of all players since 1950. First, we drop a couple of blank columns, and the "Tm" column, that contains the team.
Step3: A second file, players.csv, contains static information for each player, as height, weight, etc.
Step4: We merge both tables, and do some data cleaning
Step5: We will train a neural network with this data, to try to predict the position of each player.
Step6: Neural network training
Step7: The model performs well both for the validation and the test sets (65% might not seem a lot, but it is satisfying enough for our problem, where all the labels are very subjective (Was Larry Bird a "SM-PF" or a "PF-SF"? Nobody can tell).
Step8: Predicting the positions of the First NBA Team of 2017
Step9: The model gets right four of the five. It's even more interesting that the one that gets wrong, Anthony Davis, can play in both PF and C positions, and that in the last season, he played more as a Power Forward than as a Center, as the model predicts
Step10: The model gets right most of the players, and the errors are always for a contiguous position (it is interesting that the model does it without having been provided with any information about the distances between the labels.)
Step11: Curry is labeled as a point guards (his real position) from 1973 until today, and as a shooting guard before that. Perhaps because of his heigh (191cm), or perhaps because he is too much of a scorer. Bob Pettit is labeled as a center until 1967, and as a power forward after that (he played both roles, but nowadays he would have difficulties to play as a center, and would be for sure a forwards, perhaps even a small forward).
Step12: The model is able to detect the conversion of Jordan into a forward at the end of his career, but not the return of Magic as a power forward. Also, in his rookie season, he is classified as a small forward instead of as a shooting guard (Magic was clearly and outlier in the data, a 205cm point guard who could easily play in the five position. It is even surprised that is properly labelled as a point guard during most of his career)
|
12,581
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
import pandas as pd
SERVER = 'http://data.neonscience.org/api/v0/'
SITECODE = 'WOOD'
PRODUCTCODE = 'DP1.00041.001'
#Get availability
site_request = requests.get(SERVER+'sites/'+SITECODE)
site_json = site_request.json()
for product in site_json['data']['dataProducts']:
if(product['dataProductCode'] == PRODUCTCODE):
print(product['availableMonths'])
#Request available files
data_request = requests.get(SERVER+'data/'+PRODUCTCODE+'/'+SITECODE+'/'+'2018-08')
data_json = data_request.json()
for file in data_json['data']['files'][0:20]:
print(file['name'])
print(data_json['data']['files'][7]['name'])
#View names of files that don't containg recoded sensor data
for file in data_json['data']['files']:
if(not ('basic' in file['name'])):
if(not ('expanded' in file['name'])): #Avoid csv files of basic or expanded data
print(file['name'])
#Obtain url of text file and readme
for file in data_json['data']['files']:
if('readme' in file['name']):
readme_url = file['url']
elif('variables' in file['name']):
variable_url = file['url']
readme_req = requests.get(readme_url)
#Print contents of text file
print(readme_req.text)
#Read variables csv into pandas dataframe
df_variables = pd.read_csv(variable_url)
#Filter and show rows for variables in a 1-minute-average table and basic download package
df_variables[(df_variables['table'] == 'ST_1_minute')&(df_variables['downloadPkg'] == 'basic')]
#Check file name and read in file to a data frame
print(data_json['data']['files'][6]['name'])
df_soil_1min = pd.read_csv(data_json['data']['files'][6]['url'])
#Display dimensions:
print('Number of columns: ',df_soil_1min.shape[1])
print('Number of rows: ', df_soil_1min.shape[0])
#Display names and types of columns
df_soil_1min.dtypes
#Print first ten rows of data
df_soil_1min.head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's get soil temperature data from NEON's Woodworth site. Soil temperature data is measured and recorded automatically by soil temeprature probes.
Step2: Seeing as this dataset is collected by automated instruments, it is available (mostly continuously) since the site was established. Let's get the first 20 data file names available for August 2018.
Step3: Let's break down the name of one of these files.
Step4: The format for naming instrumentation data files, specifically soil temperature measurements, is
Step5: These files include "read me" text files, and files with descriptions of the variables being measured. These provide useful context in interpreting and using the data we download. First we take a quick look at the readme file.
Step6: Next let's look at the 'variables' CSV file listed above. As with observational data products, this contains a table with a row for every variable in the basic and expanded data CSVs, and columns containing various information about each variable.
Step7: Downloading the Instrument Data
Step8: Note that many of the values are "aggregation" sample statistics, like mean, minimum, maximum, etc. This indicates that we aren't getting every single recorded soil temperature; as the table name suggests, we are getting the summary statistics for the data, aggregated over periods of one minute. So the first row includes mean, minimum, and maximum soil temperature for the first minute recording took place (specified by the start and end date-time variables), the second row includes a summary of values for the second minute recording took place, and so forth. Uploading different files from the available data could provide data aggregated over different time intervals.
|
12,582
|
<ASSISTANT_TASK:>
Python Code:
REGION = 'us-central1'
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
BUCKET = 'gs://' + PROJECT_ID
!bq --location=US mk census
%%bigquery
CREATE OR REPLACE TABLE census.data AS
SELECT age, workclass, education_num, occupation, hours_per_week,income_bracket,functional_weight
FROM `bigquery-public-data.ml_datasets.census_adult_income`
WHERE AGE IS NOT NULL
AND workclass IS NOT NULL
AND education_num IS NOT NULL
AND occupation IS NOT NULL
AND hours_per_week IS NOT NULL
AND income_bracket IS NOT NULL
AND functional_weight IS NOT NULL
!mkdir scikit_trainer_image
%%writefile ./scikit_trainer_image/train.py
Census Scikit classifier trainer script.
import pickle
import subprocess
import sys
import datetime
import os
import fire
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
def train_evaluate(training_dataset_path, validation_dataset_path,output_dir):
Trains the Census Classifier model.
# Ingest data into Pandas Dataframes
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
df_train = pd.concat([df_train, df_validation])
numeric_features = [
'age', 'education_num','hours_per_week'
]
categorical_features = ['workclass', 'occupation']
# Scale numeric features, one-hot encode categorical features
preprocessor = ColumnTransformer(transformers=[(
'num', StandardScaler(),
numeric_features),
('cat', OneHotEncoder(), categorical_features)])
pipeline = Pipeline([('preprocessor', preprocessor),
('classifier', SGDClassifier(loss='log'))])
num_features_type_map = {feature: 'float64' for feature in numeric_features}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
X_train = df_train.drop('income_bracket', axis=1)
y_train = df_train['income_bracket']
# Set parameters of the model and fit
pipeline.set_params(classifier__alpha=0.0005, classifier__max_iter=250)
pipeline.fit(X_train, y_train)
# Save the model locally
model_filename = 'model.pkl'
with open(model_filename, 'wb') as model_file:
pickle.dump(pipeline, model_file)
# Copy to model to GCS
EXPORT_PATH = os.path.join(
output_dir, datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
gcs_model_path = '{}/{}'.format(EXPORT_PATH, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path])
print('Saved model in: {}'.format(gcs_model_path))
if __name__ == '__main__':
fire.Fire(train_evaluate)
%%writefile ./scikit_trainer_image/Dockerfile
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire scikit-learn==0.23.2 pandas==1.1.1
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
SCIKIT_IMAGE_NAME='scikit_trainer_image'
SCIKIT_IMAGE_TAG='latest'
SCIKIT_IMAGE_URI='gcr.io/{}/{}:{}'.format(PROJECT_ID, SCIKIT_IMAGE_NAME, SCIKIT_IMAGE_TAG)
!gcloud builds submit --tag $SCIKIT_IMAGE_URI $SCIKIT_IMAGE_NAME
!mkdir tensorflow_trainer_image
%%writefile ./tensorflow_trainer_image/train.py
Census Tensorflow classifier trainer script.
import pickle
import subprocess
import sys
import fire
import pandas as pd
import tensorflow as tf
import datetime
import os
CSV_COLUMNS = ["age",
"workclass",
"education_num",
"occupation",
"hours_per_week",
"income_bracket"]
# Add string name for label column
LABEL_COLUMN = "income_bracket"
# Set default values for each CSV column as a list of lists.
# Treat is_male and plurality as strings.
DEFAULTS = [[18], ["?"], [4], ["?"], [20],["<=50K"]]
def features_and_labels(row_data):
cols = tf.io.decode_csv(row_data, record_defaults=DEFAULTS)
feats = {
'age': tf.reshape(cols[0], [1,]),
'workclass': tf.reshape(cols[1],[1,]),
'education_num': tf.reshape(cols[2],[1,]),
'occupation': tf.reshape(cols[3],[1,]),
'hours_per_week': tf.reshape(cols[4],[1,]),
'income_bracket': cols[5]
}
label = feats.pop('income_bracket')
label_int = tf.case([(tf.math.equal(label,tf.constant([' <=50K'])), lambda: 0),
(tf.math.equal(label,tf.constant([' >50K'])), lambda: 1)])
return feats, label_int
def load_dataset(pattern, batch_size=1, mode='eval'):
# Make a CSV dataset
filelist = tf.io.gfile.glob(pattern)
dataset = tf.data.TextLineDataset(filelist).skip(1)
dataset = dataset.map(features_and_labels)
# Shuffle and repeat for training
if mode == 'train':
dataset = dataset.shuffle(buffer_size=10*batch_size).batch(batch_size).repeat()
else:
dataset = dataset.batch(10)
return dataset
def train_evaluate(training_dataset_path, validation_dataset_path, batch_size, num_train_examples, num_evals, output_dir):
inputs = {
'age': tf.keras.layers.Input(name='age',shape=[None],dtype='int32'),
'workclass': tf.keras.layers.Input(name='workclass',shape=[None],dtype='string'),
'education_num': tf.keras.layers.Input(name='education_num',shape=[None],dtype='int32'),
'occupation': tf.keras.layers.Input(name='occupation',shape=[None],dtype='string'),
'hours_per_week': tf.keras.layers.Input(name='hours_per_week',shape=[None],dtype='int32')
}
batch_size = int(batch_size)
num_train_examples = int(num_train_examples)
num_evals = int(num_evals)
feat_cols = {
'age': tf.feature_column.numeric_column('age'),
'workclass': tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_hash_bucket(
key='workclass', hash_bucket_size=100
)
),
'education_num': tf.feature_column.numeric_column('education_num'),
'occupation': tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_hash_bucket(
key='occupation', hash_bucket_size=100
)
),
'hours_per_week': tf.feature_column.numeric_column('hours_per_week')
}
dnn_inputs = tf.keras.layers.DenseFeatures(
feature_columns=feat_cols.values())(inputs)
h1 = tf.keras.layers.Dense(64, activation='relu')(dnn_inputs)
h2 = tf.keras.layers.Dense(128, activation='relu')(h1)
h3 = tf.keras.layers.Dense(64, activation='relu')(h2)
output = tf.keras.layers.Dense(1, activation='sigmoid')(h3)
model = tf.keras.models.Model(inputs=inputs,outputs=output)
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
trainds = load_dataset(
pattern=training_dataset_path,
batch_size=batch_size,
mode='train')
evalds = load_dataset(
pattern=validation_dataset_path,
mode='eval')
steps_per_epoch = num_train_examples // (batch_size * num_evals)
history = model.fit(
trainds,
validation_data=evalds,
validation_steps=100,
epochs=num_evals,
steps_per_epoch=steps_per_epoch
)
EXPORT_PATH = os.path.join(
output_dir, datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
tf.saved_model.save(
obj=model, export_dir=EXPORT_PATH) # with default serving function
print("Exported trained model to {}".format(EXPORT_PATH))
if __name__ == '__main__':
fire.Fire(train_evaluate)
%%writefile ./tensorflow_trainer_image/Dockerfile
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire tensorflow==2.1.1
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
TF_IMAGE_NAME='tensorflow_trainer_image'
TF_IMAGE_TAG='latest'
TF_IMAGE_URI='gcr.io/{}/{}:{}'.format(PROJECT_ID, TF_IMAGE_NAME, TF_IMAGE_TAG)
!gcloud builds submit --tag $TF_IMAGE_URI $TF_IMAGE_NAME
!mkdir pytorch_trainer_image
%%writefile ./pytorch_trainer_image/train.py
import os
import subprocess
import datetime
import fire
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
class TrainData(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__ (self):
return len(self.X_data)
class BinaryClassifier(nn.Module):
def __init__(self):
super(BinaryClassifier, self).__init__()
# 27 input features
self.h1 = nn.Linear(27, 64)
self.h2 = nn.Linear(64, 64)
self.output_layer = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(64)
self.batchnorm2 = nn.BatchNorm1d(64)
def forward(self, inputs):
x = self.relu(self.h1(inputs))
x = self.batchnorm1(x)
x = self.relu(self.h2(x))
x = self.batchnorm2(x)
x = self.dropout(x)
x = self.output_layer(x)
return x
def binary_acc(y_pred, y_true):
Calculates accuracy
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_true).sum().float()
acc = correct_results_sum/y_true.shape[0]
acc = torch.round(acc * 100)
return acc
def train_evaluate(training_dataset_path, validation_dataset_path, batch_size, num_epochs, output_dir):
batch_size = int(batch_size)
num_epochs = int(num_epochs)
# Read in train/validation data and concat
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
df = pd.concat([df_train, df_validation])
categorical_features = ['workclass', 'occupation']
target='income_bracket'
# One-hot encode categorical variables
df = pd.get_dummies(df,columns=categorical_features)
# Change label to 0 if <=50K, 1 if >50K
df[target] = df[target].apply(lambda x: 0 if x==' <=50K' else 1)
# Split features and labels into 2 different vars
X_train = df.loc[:, df.columns != target]
y_train = np.array(df[target])
# Normalize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
# Training data
train_data = TrainData(torch.FloatTensor(X_train),
torch.FloatTensor(y_train))
# Use torch DataLoader to feed data to model
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, drop_last=True)
# Instantiate model
model = BinaryClassifier()
# Loss is binary crossentropy w/ logits. Must manually implement sigmoid for inference
criterion = nn.BCEWithLogitsLoss()
# Adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
model.train()
for e in range(1, num_epochs+1):
epoch_loss = 0
epoch_acc = 0
for X_batch, y_batch in train_loader:
optimizer.zero_grad()
y_pred = model(X_batch)
loss = criterion(y_pred, y_batch.unsqueeze(1))
acc = binary_acc(y_pred, y_batch.unsqueeze(1))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
print(f'Epoch {e}: Loss = {epoch_loss/len(train_loader):.5f} | Acc = {epoch_acc/len(train_loader):.3f}')
# Save the model locally
model_filename='model.pt'
torch.save(model.state_dict(), model_filename)
EXPORT_PATH = os.path.join(
output_dir, datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
# Copy the model to GCS
gcs_model_path = '{}/{}'.format(EXPORT_PATH, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path])
print('Saved model in: {}'.format(gcs_model_path))
if __name__ == '__main__':
fire.Fire(train_evaluate)
%%writefile ./pytorch_trainer_image/Dockerfile
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire torch==1.6.0 scikit-learn==0.23.2 pandas==1.1.1
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
TORCH_IMAGE_NAME='pytorch_trainer_image'
TORCH_IMAGE_TAG='latest'
TORCH_IMAGE_URI='gcr.io/{}/{}:{}'.format(PROJECT_ID, TORCH_IMAGE_NAME, TORCH_IMAGE_TAG)
!gcloud builds submit --tag $TORCH_IMAGE_URI $TORCH_IMAGE_NAME
!mkdir xgboost_trainer_image
%%writefile ./xgboost_trainer_image/train.py
import os
import subprocess
import datetime
import fire
import pickle
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
def train_evaluate(training_dataset_path, validation_dataset_path,max_depth,n_estimators,output_dir):
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
df = pd.concat([df_train, df_validation])
categorical_features = ['workclass', 'occupation']
target='income_bracket'
# One-hot encode categorical variables
df = pd.get_dummies(df,columns=categorical_features)
# Change label to 0 if <=50K, 1 if >50K
df[target] = df[target].apply(lambda x: 0 if x==' <=50K' else 1)
# Split features and labels into 2 different vars
X_train = df.loc[:, df.columns != target]
y_train = np.array(df[target])
# Normalize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
grid = {
'max_depth': int(max_depth),
'n_estimators': int(n_estimators)
}
model = XGBClassifier()
model.set_params(**grid)
model.fit(X_train,y_train)
model_filename = 'xgb_model.pkl'
pickle.dump(model, open(model_filename, "wb"))
EXPORT_PATH = os.path.join(
output_dir, datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
gcs_model_path = '{}/{}'.format(EXPORT_PATH, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path])
print('Saved model in: {}'.format(gcs_model_path))
if __name__ == '__main__':
fire.Fire(train_evaluate)
%%writefile ./xgboost_trainer_image/Dockerfile
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire scikit-learn==0.23.2 pandas==1.1.1 xgboost==1.2.0
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
XGB_IMAGE_NAME='xgboost_trainer_image'
XGB_IMAGE_TAG='latest'
XGB_IMAGE_URI='gcr.io/{}/{}:{}'.format(PROJECT_ID, XGB_IMAGE_NAME, XGB_IMAGE_TAG)
!gcloud builds submit --tag $XGB_IMAGE_URI $XGB_IMAGE_NAME
!mkdir pipeline
%%writefile ./pipeline/census_training_pipeline.py
import os
import kfp
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
import kfp.components as comp
import kfp.dsl as dsl
import kfp.gcp as gcp
import json
# We will use environment vars to set the trainer image names and bucket name
TF_TRAINER_IMAGE = os.getenv('TF_TRAINER_IMAGE')
SCIKIT_TRAINER_IMAGE = os.getenv('SCIKIT_TRAINER_IMAGE')
TORCH_TRAINER_IMAGE = os.getenv('TORCH_TRAINER_IMAGE')
XGB_TRAINER_IMAGE = os.getenv('XGB_TRAINER_IMAGE')
BUCKET = os.getenv('BUCKET')
# Paths to export the training/validation data from bigquery
TRAINING_OUTPUT_PATH = BUCKET + '/census/data/training.csv'
VALIDATION_OUTPUT_PATH = BUCKET + '/census/data/validation.csv'
COMPONENT_URL_SEARCH_PREFIX = 'https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/'
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
# Load BigQuery and AI Platform Training op
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
def get_query(dataset='training'):
Function that returns either training or validation query
if dataset=='training':
split = "MOD(ABS(FARM_FINGERPRINT(CAST(functional_weight AS STRING))), 100) < 80"
elif dataset=='validation':
split = MOD(ABS(FARM_FINGERPRINT(CAST(functional_weight AS STRING))), 100) >= 80
AND MOD(ABS(FARM_FINGERPRINT(CAST(functional_weight AS STRING))), 100) < 90
else:
split = "MOD(ABS(FARM_FINGERPRINT(CAST(functional_weight AS STRING))), 100) >= 90"
query = SELECT age, workclass, education_num, occupation, hours_per_week,income_bracket
FROM census.data
WHERE {0}.format(split)
return query
# We will use the training/validation queries as inputs to our pipeline
# This lets us change the training/validation datasets if we wish by simply
# Changing the query.
TRAIN_QUERY = get_query(dataset='training')
VALIDATION_QUERY=get_query(dataset='validation')
@dsl.pipeline(
name='Continuous Training with Multiple Frameworks',
description='Pipeline to create training/validation splits w/ BigQuery then launch multiple AI Platform Training Jobs'
)
def pipeline(
project_id,
train_query=TRAIN_QUERY,
validation_query=VALIDATION_QUERY,
region='us-central1'
):
# Creating the training data split
create_training_split = bigquery_query_op(
query=train_query,
project_id=project_id,
output_gcs_path=TRAINING_OUTPUT_PATH
).set_display_name('BQ Train Split')
# Creating the validation data split
create_validation_split = bigquery_query_op(
query=validation_query,
project_id=project_id,
output_gcs_path=VALIDATION_OUTPUT_PATH
).set_display_name('BQ Eval Split')
# These are the output directories where our models will be saved
tf_output_dir = BUCKET + '/census/models/tf'
scikit_output_dir = BUCKET + '/census/models/scikit'
torch_output_dir = BUCKET + '/census/models/torch'
xgb_output_dir = BUCKET + '/census/models/xgb'
# Training arguments to be passed to the TF Trainer
tf_args = [
'--training_dataset_path', create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path', create_validation_split.outputs['output_gcs_path'],
'--output_dir', tf_output_dir,
'--batch_size', '32',
'--num_train_examples', '1000',
'--num_evals', '10'
]
# Training arguments to be passed to the Scikit-learn Trainer
scikit_args = [
'--training_dataset_path', create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path', create_validation_split.outputs['output_gcs_path'],
'--output_dir', scikit_output_dir
]
# Training arguments to be passed to the PyTorch Trainer
torch_args = [
'--training_dataset_path', create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path', create_validation_split.outputs['output_gcs_path'],
'--output_dir', torch_output_dir,
'--batch_size', '32',
'--num_epochs', '15',
]
# Training arguments to be passed to the XGBoost Trainer
xgb_args = [
'--training_dataset_path', create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path', create_validation_split.outputs['output_gcs_path'],
'--output_dir', xgb_output_dir,
'--max_depth', '10',
'--n_estimators', '100'
]
# AI Platform Training Jobs with all 4 trainer images
train_scikit = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=SCIKIT_TRAINER_IMAGE,
args=scikit_args).set_display_name('Scikit Model - AI Platform Training')
train_tf = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TF_TRAINER_IMAGE,
args=tf_args).set_display_name('Tensorflow Model - AI Platform Training')
train_torch = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TORCH_TRAINER_IMAGE,
args=torch_args).set_display_name('Pytorch Model - AI Platform Training')
train_xgb = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=XGB_TRAINER_IMAGE,
args=xgb_args).set_display_name('XGBoost Model - AI Platform Training')
TAG = 'latest'
SCIKIT_TRAINER_IMAGE = 'gcr.io/{}/scikit_trainer_image:{}'.format(PROJECT_ID, TAG)
TF_TRAINER_IMAGE = 'gcr.io/{}/tensorflow_trainer_image:{}'.format(PROJECT_ID, TAG)
TORCH_TRAINER_IMAGE = 'gcr.io/{}/pytorch_trainer_image:{}'.format(PROJECT_ID, TAG)
XGB_TRAINER_IMAGE = 'gcr.io/{}/xgboost_trainer_image:{}'.format(PROJECT_ID, TAG)
%env TF_TRAINER_IMAGE={TF_TRAINER_IMAGE}
%env SCIKIT_TRAINER_IMAGE={SCIKIT_TRAINER_IMAGE}
%env TORCH_TRAINER_IMAGE={TORCH_TRAINER_IMAGE}
%env XGB_TRAINER_IMAGE={XGB_TRAINER_IMAGE}
%env BUCKET={BUCKET}
!dsl-compile --py pipeline/census_training_pipeline.py --output census_training_pipeline.yaml
!head census_training_pipeline.yaml
#TODO: Change ENDPOINT to the ENDPOINT for your AI Platform Pipelines Instance
ENDPOINT = ''
PIPELINE_NAME = 'census_trainer_multiple_models'
!kfp --endpoint $ENDPOINT pipeline upload \
-p $PIPELINE_NAME \
./census_training_pipeline.yaml
print(f"https://{ENDPOINT}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, create a BigQuery dataset. We will then query a public BigQuery dataset to populate a table in this dataset. This is census data. We will use age, workclass, education, occupation, and hours per week to predict income bracket. Note
Step4: Create Scikit-learn Training Script
Step5: Package Scikit-learn training script into a Docker image
Step6: Build the Scikit-learn trainer image
Step8: Create TensorFlow Training Script
Step9: Package TensorFlow Training Script into a Docker Image
Step10: Build the Tensorflow Trainer Image
Step12: Create PyTorch Training Script
Step13: Package PyTorch Training Script into a Docker Image
Step14: Build the PyTorch Trainer Image
Step15: Create XGBoost Training Script
Step16: Package XGBoost Training Script into a Docker Image
Step17: Build the XGBoost Trainer Image
Step21: Develop KubeFlow Pipeline
Step22: Set environment variables for the different trainer image names as well as our bucket.
Step23: Compile the Pipeline
Step24: Take a look at the head of the yaml file
Step25: Deploy your KubeFlow Pipeline
Step26: Continuous Training
|
12,583
|
<ASSISTANT_TASK:>
Python Code:
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import urllib2
def check_condition(row):
if row[-1] == 0:
return False
return True
url = ('https://raw.githubusercontent.com/Upward-Spiral-Science'
'/data/master/syn-density/output.csv')
data = urllib2.urlopen(url)
csv = np.genfromtxt(data, delimiter=",")
# only look at data points with nonzero synapse value
a = np.apply_along_axis(check_condition, 1, csv)
a = np.where(a == True)[0]
nonzero_rows = csv[a, :]
nonzero_rows = nonzero_rows[1:, :]
# Total number of synapses
print np.sum(nonzero_rows[:,4])
# Max number of synapses
max_syn = np.argmax(nonzero_rows[:,4])
print max_syn
loc = (nonzero_rows[max_syn,0],nonzero_rows[max_syn,1],nonzero_rows[max_syn,2]);
print loc
print [min(csv[1:,1]),min(csv[1:,2]),min(csv[1:,3])] #(x,y,z) minimum
print [max(csv[1:,1]),max(csv[1:,2]),max(csv[1:,3])] #(x,y,z) maximum
# Histogram
fig = plt.figure()
ax = fig.gca()
plt.hist(nonzero_rows[:,4])
ax.set_title('Synapse Density')
ax.set_xlabel('Number of Synapses')
ax.set_ylabel('Number of (x,y,z) points with synapse density = x')
plt.show()
# PMF
syns = csv[1:,4]
sum = np.sum(syns)
density = syns/sum
mean = np.mean(density)
std = np.std(density)
print std, mean
#for locating synapse values of zero
def check_condition(row):
if row[-1] == 0:
return False
return True
#for filtering by the mean number of synapses
def synapse_filt(row, avg):
if row[-1] > avg:
return True
return False
samples = 5000
# only look at data points where the number of synapses is greater than avg
a = np.apply_along_axis(check_condition, 1, csv)
a = np.where(a == True)[0]
nonzero_rows = csv[a, :]
avg_synapse = np.mean(nonzero_rows[1:, -1])
print avg_synapse
filter_avg_synapse = np.apply_along_axis(synapse_filt, 1,
nonzero_rows, avg_synapse)
a = np.where(filter_avg_synapse == True)[0]
nonzero_filtered = nonzero_rows[a, :]
xyz_only = nonzero_filtered[:, [1, 2, 3]]
#randomly sample from the remaining data points
perm = np.random.permutation(xrange(1, len(xyz_only[:])))
xyz_only = xyz_only[perm[:samples]]
# get range for graphing
x_min = np.amin(xyz_only[:, 0])
x_max = np.amax(xyz_only[:, 0])
y_max = np.amax(xyz_only[:, 1])
y_min = np.amin(xyz_only[:, 1])
z_min = np.amin(xyz_only[:, 2])
z_max = np.amax(xyz_only[:, 2])
# following code adopted from
# https://www.getdatajoy.com/examples/python-plots/3d-scatter-plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_title('3D Scatter Plot')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_zlim(z_min, z_max)
ax.view_init()
ax.dist = 12 # distance
ax.scatter(
xyz_only[:, 0], xyz_only[:, 1], xyz_only[:, 2], # data
color='purple', # marker colour
marker='o', # marker shape
s=30 # marker size
)
plt.show() # render the plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What is the total number of synapses in our data set?
Step2: What is the maximum number of synapses at a given point in our data set?
Step3: What are the minimum and maximum x, y, and z values? (and thus, the set of (x,y,z) for our data set?
Step4: What does the histogram of our data look like?
Step5: What does the probability mass function of our data look like?
Step6: What does our data look like in a 3-D scatter plot?
|
12,584
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
ad_data = pd.read_csv('advertising.csv')
ad_data.head()
ad_data.info()
ad_data.describe()
sns.distplot(ad_data['Age'],kde=False,bins=30,color='blue')
sns.jointplot(data=ad_data,x='Age',y='Area Income')
sns.jointplot(data=ad_data,x='Age',y='Daily Time Spent on Site',kind='kde')
sns.jointplot(data=ad_data,x='Daily Time Spent on Site',y='Daily Internet Usage',color='green')
sns.pairplot(ad_data,hue='Clicked on Ad')
ad_data.columns
from sklearn.model_selection import train_test_split
X = ad_data[['Daily Time Spent on Site', 'Age', 'Area Income','Daily Internet Usage','Male']]
y = ad_data['Clicked on Ad']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101)
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
logmodel.coef_
predictions = logmodel.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
from collections import OrderedDict
d = OrderedDict({'Daily Time Spent on Site': 500, 'Age': 18, 'Area Income':23000,'Daily Internet Usage': 160,'Male': 1})
df = pd.DataFrame(d,index=[0])
sample_predict = logmodel.predict(df)
print(sample_predict)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the Data
Step2: Check the head of ad_data
Step3: Use info and describe() on ad_data
Step4: Exploratory Data Analysis
Step5: Create a jointplot showing Area Income versus Age.
Step6: Create a jointplot showing the kde distributions of Daily Time spent on site vs. Age.
Step7: Create a jointplot of 'Daily Time Spent on Site' vs. 'Daily Internet Usage'
Step8: Finally, create a pairplot with the hue defined by the 'Clicked on Ad' column feature.
Step9: Logistic Regression
Step10: Train and fit a logistic regression model on the training set.
Step11: Predictions and Evaluations
Step12: Create a classification report for the model.
Step13: Great Job!
|
12,585
|
<ASSISTANT_TASK:>
Python Code:
# Load the needed packages
import os
import matplotlib.pyplot as plt
import numpy as np
from netCDF4 import Dataset
import awot
from awot.graph import FlightLevel, RadarVerticalPlot, MicrophysicalVerticalPlot
%matplotlib inline
file1 = "WCR.OWLES13.20131215.225944_234806.up-down.nc"
#file = "WCR.TEST14.20140618.200302_201559.up.nc"
wcrf1 = os.path.join("/Users/guy/data/king_air/owles2013/wcr", file1)
file2 = "W-CORRECTED.WCR.OWLES13.20131215.225944_234806.up-down.nc"
wcrf2 = os.path.join("/Users/guy/data/king_air/owles2013/wcr/", file2)
start_time = "2013-12-16 01:25:00"
end_time = "2013-12-16 01:30:00"
# Set the project name
Project="OWLeS"
#OWLES
altmin, altmax = 0., 3000.
refmin, refmax = -50., 30.
#velmin, velmax = -16., 16. # Nyquist
velmin, velmax = -3., 3. # Nyquist
wcr = awot.io.read_wcr2(fname=wcrf1)
nc = Dataset(wcrf2)
velcor = nc.variables['Velocity_cor_2']
awot.util.add_dict_to_awot_fields(wcr, 'velocity_corrected', data=velcor[:],
units=velcor.units, longname=velcor.long_name, stdname="Corrected velocity")
print(wcr['fields']['velocity']['data'].shape, wcr['fields']['velocity_corrected']['data'].shape)
print(np.ma.min(wcr['fields']['velocity_corrected']['data']), np.ma.max(wcr['fields']['velocity_corrected']['data']))
print(wcr.keys())
print("Reflectivity Min/Max = %f / %f \n"
"Velocity Min/Max = %f / %f \n"
"Altitude Min/Max = %f / %f \n"
"Height Min/Max = %f / %f \n"
"Surface Min/Max = %f / %f \n"%(
wcr['fields']['reflectivity']['data'].min(),wcr['fields']['reflectivity']['data'].max(),
wcr['fields']['velocity']['data'].min(),wcr['fields']['velocity']['data'].max(),
wcr['altitude']['data'].min(), wcr['altitude']['data'].max(),
wcr['height']['data'].min(), wcr['height']['data'].max(),
wcr['surface']['data'].min(), wcr['surface']['data'].max()))
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(7.5,10), sharex=True)
wcr_vp= RadarVerticalPlot(wcr)
wcr_vp.time_height_image('reflectivity', ax=ax1, plot_log10_var=False,
# start_time=start_time, end_time=end_time,
vmin=refmin, vmax=refmax,
fill_surface=True,
cb_label=r'Reflectivity (dBZ)',
height_min=altmin, height_max=altmax, title=file1,
ylab=r'Altitude (m)', ylabFontSize=12)
wcr_vp.time_height_image('velocity', ax=ax2, plot_log10_var=False,
# start_time=start_time, end_time=end_time,
vmin=velmin, vmax=velmax,
fill_surface=True,
cmap="PuOr_r",
cb_label=r'Doppler Velocity (m s$^{-1}$)',
height_min=altmin, height_max=altmax,
ylab=r'Altitude (m)', ylabFontSize=12,
xlab='UTC Time', xlabFontSize=12)
wcr_vp.time_height_image('velocity_corrected', ax=ax3, plot_log10_var=False,
# start_time=start_time, end_time=end_time,
vmin=velmin, vmax=velmax,
fill_surface=True,
cmap="PuOr_r",
cb_label=r'Doppler Velocity (m s$^{-1}$)',
height_min=altmin, height_max=altmax,
ylab=r'Altitude (m)', ylabFontSize=12,
xlab='UTC Time', xlabFontSize=12)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <b>Supply input data and plotting characteristics. In this case we'll use a file from the OWLeS project and corrected field of velocity data using sounding data.</b>
Step2: <b>Read in the radar data</b>
Step3: <b>Read a variable from another file and add it to the AWOT dictionary. A mask of invalid data is automatically applied. Additional masking can be accomplished by setting the <i>mask_value</i> keyword.</b>
Step4: <b>Make a vertical time series plot of reflectivity and velocity fields</b>
|
12,586
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
%matplotlib inline
path = "data/dogscats/"
#path = "data/dogscats/sample/"
import os, json
from glob import glob
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
# check that ~/.keras/keras.json is set for Theano and includes "image_data_format": "channels_first"
from importlib import reload # Python 3
import utils; reload(utils)
from utils import plots
# As large as you can, but no larger than 64 is recommended.
# If you have an older or cheaper GPU, you'll run out of memory, so will have to decrease this.
#batch_size = 1
#batch_size = 4
batch_size = 64
# Import our class, and instantiate
import vgg16; reload(vgg16)
from vgg16 import Vgg16
vgg = Vgg16()
# Grab a few images at a time for training and validation.
# NB: They must be in subdirectories named based on their category
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2)
vgg.finetune(batches)
vgg.fit(batches, val_batches, batch_size, nb_epoch=1)
vgg = Vgg16()
batches = vgg.get_batches(path+'train', batch_size=4)
imgs,labels = next(batches)
plots(imgs, titles=labels)
vgg.predict(imgs, True)
vgg.classes[:4]
#batch_size = 1
#batch_size = 4
batch_size = 64
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, batch_size, nb_epoch=1)
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
FILES_PATH = 'http://www.platform.ai/models/'; CLASS_FILE='imagenet_class_index.json'
# Keras' get_file() is a handy function that downloads files, and caches them for re-use later
fpath = get_file(CLASS_FILE, FILES_PATH+CLASS_FILE, cache_subdir='models')
with open(fpath) as f: class_dict = json.load(f)
# Convert dictionary with string indexes into an array
classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
classes[:5]
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(filters, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
def FCBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
# Mean of each channel as provided by VGG researchers
vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean # subtract mean
return x[:, ::-1] # reverse axis bgr->rgb
def VGG_16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FCBlock(model)
FCBlock(model)
model.add(Dense(1000, activation='softmax'))
return model
model = VGG_16()
fpath = get_file('vgg16.h5', FILES_PATH+'vgg16.h5', cache_subdir='models')
model.load_weights(fpath)
batch_size = 4
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True,
batch_size=batch_size, class_mode='categorical'):
return gen.flow_from_directory(path+dirname, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
batches = get_batches('train', batch_size=batch_size)
val_batches = get_batches('valid', batch_size=batch_size)
imgs,labels = next(batches)
# This shows the 'ground truth'
plots(imgs, titles=labels)
def pred_batch(imgs):
preds = model.predict(imgs)
idxs = np.argmax(preds, axis=1)
print('Shape: {}'.format(preds.shape))
print('First 5 classes: {}'.format(classes[:5]))
print('First 5 probabilities: {}\n'.format(preds[0, :5]))
print('Predictions prob/class: ')
for i in range(len(idxs)):
idx = idxs[i]
print (' {:.4f}/{}'.format(preds[i, idx], classes[idx]))
pred_batch(imgs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define path to data
Step2: A few basic libraries that we'll need for the initial exercises
Step3: We have created a file most imaginatively called 'utils.py' to store any little convenience functions we'll want to use. We will discuss these as we use them.
Step4: Use a pretrained VGG model with our Vgg16 class
Step5: The code above will work for any image recognition task, with any number of categories! All you have to do is to put your images into one folder per category, and run the code above.
Step6: Vgg16 is built on top of Keras (which we will be learning much more about shortly!), a flexible, easy to use deep learning library that sits on top of Theano or Tensorflow. Keras reads groups of images and labels in batches, using a fixed directory structure, where images from each category for training must be placed in a separate folder.
Step7: (BTW, when Keras refers to 'classes', it doesn't mean python classes - but rather it refers to the categories of the labels, such as 'pug', or 'tabby'.)
Step8: As you can see, the labels for each image are an array, containing a 1 in the first position if it's a cat, and in the second position if it's a dog. This approach to encoding categorical variables, where an array containing just a single 1 in the position corresponding to the category, is very common in deep learning. It is called one hot encoding.
Step9: We can now pass the images to Vgg16's predict() function to get back probabilities, category indexes, and category names for each image's VGG prediction.
Step10: The category indexes are based on the ordering of categories used in the VGG model - e.g here are the first four
Step11: (Note that, other than creating the Vgg16 object, none of these steps are necessary to build a model; they are just showing how to use the class to view imagenet predictions.)
Step12: Calling finetune() modifies the model such that it will be trained based on the data in the batches provided - in this case, to predict either 'dog' or 'cat'.
Step13: Finally, we fit() the parameters of the model using the training data, reporting the accuracy on the validation set after every epoch. (An epoch is one full pass through the training data.)
Step14: That shows all of the steps involved in using the Vgg16 class to create an image recognition model using whatever labels you are interested in. For instance, this process could classify paintings by style, or leaves by type of disease, or satellite photos by type of crop, and so forth.
Step15: Let's import the mappings from VGG ids to imagenet category ids and descriptions, for display purposes later.
Step16: Here's a few examples of the categories we just imported
Step17: Model creation
Step18: ...and here's the fully-connected definition.
Step19: When the VGG model was trained in 2014, the creators subtracted the average of each of the three (R,G,B) channels first, so that the data for each channel had a mean of zero. Furthermore, their software that expected the channels to be in B,G,R order, whereas Python by default uses R,G,B. We need to preprocess our data to make these two changes, so that it is compatible with the VGG model
Step20: Now we're ready to define the VGG model architecture - look at how simple it is, now that we have the basic blocks defined!
Step21: We'll learn about what these different blocks do later in the course. For now, it's enough to know that
Step22: As well as the architecture, we need the weights that the VGG creators trained. The weights are the part of the model that is learnt from the data, whereas the architecture is pre-defined based on the nature of the problem.
Step23: Getting imagenet predictions
Step24: Keras provides functionality to create batches of data from directories containing images; all we have to do is to define the size to resize the images to, what type of labels to create, whether to randomly shuffle the images, and how many images to include in each batch. We use this little wrapper to define some helpful defaults appropriate for imagenet data
Step25: From here we can use exactly the same steps as before to look at predictions from the model.
Step26: The VGG model returns 1,000 probabilities for each image, representing the probability that the model assigns to each possible imagenet category for each image. By finding the index with the largest probability (with np.argmax()) we can find the predicted label.
|
12,587
|
<ASSISTANT_TASK:>
Python Code:
import vcsn
c = vcsn.context('lat<lan_char(abc), lan_char(bce)>, nmin')
l = c.levenshtein()
l
a1 = vcsn.context('lan_char(abc), b').expression("bac+cab").derived_term().strip().partial_identity()
a1
a2 = vcsn.context('lan_char(bce), b').expression("bec+bebe").automaton().cominimize().strip().partial_identity()
a2
edit = a1.compose(l).compose(a2).trim()
edit
exp = edit.lift(1).proper().eval("bac").format('text')
vcsn.context("lan_char(bce), nmin").series(exp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Levenshtein automaton only has one state, but has $N*M + N + M$ transitions, for alphabets of size $N$ and $M$.
Step2: The automaton can be evaluated on one tape to get the edit distance between a word and a language.
|
12,588
|
<ASSISTANT_TASK:>
Python Code:
fruits = ['Apple', 'Mango', 'Grapes', 'Jackfruit',
'Apple', 'Banana', 'Grapes', [1, "Orange"]]
# processing the entire list
for fruit in fruits:
print(fruit, end=", ")
#
print("*"*30)
fruits.insert(0, "kiwi")
print( fruits)
# help(fruits.insert)
# Including
ft1 = list(fruits)
print(id(ft1))
print(id(fruits))
ft1 = fruits[:]
print(id(ft1))
print(id(fruits))
ft2 = fruits
print(id(ft2))
print(id(fruits))
fruits.append('Camel')
print(fruits)
fruits.append(['kiwi', 'Apple', 'Camel'])
print(fruits)
fruits.extend(['kiwi', 'Apple', 'Camel'])
print(fruits)
fruits.extend(['kiwi', ['Apple', 'Camel']])
print(fruits)
## Removing the second instance of Grapes
x = 0
y = 0
for fruit in fruits:
if x == 1 and fruit == 'Grapes':
# del (fruits[y])
fruits.pop(y)
elif fruit == 'Grapes':
x = 1
y +=1
print(fruits)
fruits.remove('Grapes')
print(fruits)
fruits.append("Grapes")
# These will work on only homogeneous list and will fail for heterogeneous
fruits.sort()
print(fruits)
fruits.reverse()
print(fruits)
# # # prints with number order
fruits = ['Apple', 'Mango', 'Grapes', 'Jackfruit',
'Apple', 'Banana', 'Grapes']
for i, prog in enumerate(fruits):
print( i + 1, '=>', prog)
my_list = ['A', 'B', 'C']
for a, b in enumerate(my_list):
print(a, b)
my_list = ['A', 'B', 'C']
print ('list:', my_list)
# # The empty list is evaluated as false
while my_list:
# In queues, the first item is the first to go out
# pop(0) removes and returns the first item
print ('Left', my_list.pop(0), ', remain', len(my_list), my_list)
my_list.append("G")
# # More items on the list
my_list += ['D', 'E', 'F']
print ('list:', my_list)
while my_list:
# On stacks, the first item is the last to go out
# pop() removes and retorns the last item
print ('Left', my_list.pop(), ', remain', len(my_list), my_list)
l = ['D', 'E', 'F', "G", "H"]
print(l)
k = ('D', "E", "G", "H")
print(dir(l))
print("*"*8)
print(dir(k))
t = ([1, 2], 4)
print(t)
print(" :: Error :: ")
t[0] = 3
print(t)
t[0] = [1, 2, 3]
print(t)
t[0].append(3)
print(t)
t[0][0] = [1, 2, 3]
print(t)
ta = (1, 2, 3, 4, 5)
for a in ta:
print (a)
ta1 = [1, 2, 3, 4, 5]
for a in ta1:
print(a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NOTE
Step2: Removing
Step3: Appending
Step4: Ordering
Step5: Inverting
Step6: The function enumerate() returns a tuple of two elements in each iteration
Step7: The sort (sort) and reversal (reverse) operations are performed in the list and do not create new lists.
|
12,589
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.linspace(0,10,11)
# your code here
#start by defining the length of the array
arrayLength = 10
#let's set the array to currently be an array of 0s
myArray = np.zeros(arrayLength) #make a numpy array of 10 zeros
# Let's define the first element of the array
myArray[0] = 1
i = 1 #with the first element defined, we can calculate the rest of the sequence beginning with the 2nd element
while i < arrayLength:
myArray[i] = myArray[i-1]+2
i = i + 1
print(myArray)
import numpy as np
np.random.rand(5)
np.random.rand(5,5)
np.random.randn(5)
import matplotlib.pyplot as plt
%matplotlib inline
#your code here
X = np.random.rand(5000)
plt.hist(X, bins=20)
#your code here
X = np.random.randn(5000)
plt.hist(X, bins=50)
mu = 5 #the mean of the distribution
sigma = 3 #the standard deviation
X = sigma * np.random.randn(5000) + mu
plt.hist(X,bins=50)
#write your observations here
N,bins,patches = plt.hist(X, bins=50)
bin_avg = (bins[1:]+bins[:-1])/2
plt.plot(bin_avg, N, 'r*')
plt.show()
mean = np.mean(X)
std = np.std(X)
print('mean: '+ repr(mean) )
print('standard deviation: ' + repr(std))
lifetimes = np.loadtxt('Data/LifetimeData.txt')
#your code here
N,bins,patches = plt.hist(lifetimes,bins=40)
#your code here
mean = np.mean(lifetimes)
std = np.std(lifetimes)
print("mean: "+repr(mean))
print("standard deviation: "+repr(std))
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
%matplotlib inline
# your code here
x_vals = np.linspace(-5,5,100)
# your code here
def f_scalar(x):
return 1/3*x**2 + 3
y = f_scalar(x_vals)
# your code here
y_noisy = y + np.random.rand(100)
plt.plot(x_vals,y_noisy)
def quadratic(x,a,b,c):
return a*x**2 + b*x + c
optimal_values, _ = curve_fit(quadratic,x_vals,y_noisy)
a = optimal_values[0]
b = optimal_values[1]
c = optimal_values[2]
print(a, b, c)
y_fitted = quadratic(x_vals,a,b,c)
plt.plot(x_vals,y_fitted)
plt.plot(x_vals,y_noisy)
plt.plot(x_vals,y_fitted)
plt.plot(x_vals,y)
# let's get you started by importing the right libraries
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
%matplotlib inline
# Step 1: Import the data
xData, yData = np.loadtxt('Data/photopeak.txt', usecols=(0,1), unpack=True)
print(xData,yData)
# Step 2: Plot the data to see what it looks like
plt.plot(xData,yData,'*')
from IPython.display import display, Image
display(Image(filename='Data/photopeak.png'))
# Step 3: Define your function here
def myGaussian(Xvals,A,mu,sigma):
return (A/np.sqrt(2*np.pi*sigma**2))*np.exp(-((Xvals-mu)**2/(2*sigma**2)))
# Step 3.5: SANITY CHECK! Use this step as a way to check that the function you defined above is mathematically correct.
mu = 0.66 #the mean of the distribution
sigma = 0.04 #the standard deviation
A = 10;
Xvals = np.linspace(0.50,0.80,100)
Yvals = A*myGaussian(Xvals,A,mu,sigma)
plt.plot(Xvals,Yvals)
# Step 4: Use curve_fit to generate your output parameters
popt, pcov = curve_fit(myGaussian, xData, yData, p0=[0.5, 0.1, 5])
#perr = np.sqrt(np.diag(pcov))
# Step 5: Generate your model fit
xFit = np.linspace(min(xData),max(xData),100) #give this
line_fit = myGaussian(xFit, *popt)
# Step 6: Plot the best fit function and the scatter plot of data
plt.plot(xData, yData, 'r*')
plt.plot(xFit, line_fit)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can do a lot with the numpy module. Below is an example to jog your memory
Step2: Do you remember loops? Let's use a while loop to make an array of 10 numbers. Let's have each element be increased by 2 compared with the previous element. Let's also have the first element of the array be 1.
Step3: There's your quick review of numpy and a while loop. Now we can move on to the content of Lecture 3.
Step4: Let's generate a numpy array of length 5 populated with uniformly distributed random numbers. The function np.random.rand takes the array output size as an argument (in this case, 5).
Step5: Additionally, you are not limited to one-dimensional arrays! Let's make a 5x5, two-dimensional array
Step6: Great, so now you have a handle on generating uniform distributions. Let's quickly look at one more type of distribution.
Step7: So these numbers probably don't mean that much to you. Don't worry; they don't mean much to me either!
Step8: Now, let's plot a uniform distribution and take a look.
Step9: Now, let's use plt.hist to see what X looks like. First, run the cell below. Then, vary bins -- doing so will either increase or decrease the apparent effect of noise in your distribution.
Step10: Nice job! Do you see why the "uniform distribution" is referred to as such?
Step11: Nice job! You just plotted a Gaussian distribution with mean of 0 and a standard deviation of 1.
Step12: Before moving onto the next section, vary the values of mu and sigma in the above code to see how your histogram changes. You should find that changing mu (the mean) affects the center of the distribution while changing sigma (the standard deviation) affects the width of the distribution.
Step13: For simplicity's sake, we've used plt.hist without generating any return variables. Remember that plt.hist takes in your data (X) and the number of bins, and it makes histograms from it. In the process, plt.hist generates variables that you can store; we just haven't thus far. Run the cell below to see -- it should replot the Gaussian from above while also generating the output variables.
Step14: Something that might be useful to you is that you can make use of variables outputted by plt.hist -- particularly bins and N.
Step15: The plot above (red stars) should look like it overlays the histogram plot above it. If that's what you see, nice job! If not, let your instructor and/or TAs know before moving onto the next section.
Step16: So you've learned how to generate distributions of numbers, plot them, and generate statistics on them. This is a great starting point, but let's try working with some real data!
Step17: Next, plot a histogram of this data set (play around with the number of bins, too).
Step18: Now, calculate and print the mean and standard deviation of this distribution.
Step19: Nice job! Now that you're used to working with real data, we're going to try to fit some more real data to known functions to gain a better understanding of that data.
Step20: We will show you an example, and then you get to try it out for yourself!
Step21: Next, we will define a function $f(x) = \frac 1 3x^2+3$ that will square the elements in x and add an offset. Call this function f_scalar, and implement it (for scalar values) below.
Step22: We will create a new variable, y, that will call the function f_scalar with x_vals as the input. Note that we are using two separate variable names, x and x_vals, so we don't confuse them! This is good programming practice; you should try not to use the same variable names unless you are intentionally overriding something.
Step23: Now we will add some noise to the array y using the np.random.rand() function and store it in a new variable called y_noisy.
Step24: Let's see what the y_noisy values look like now
Step25: It seems like there's still a rough parabolic shape, so let's see if we can recover the original y values without any noise.
Step26: Then, we want to find the optimal values of a, b, and c that will give a function that fits best to y_noisy.
Step27: Now that we have the fitted parameters, let's use quadratic to plot the fitted parabola alongside the noisy y values.
Step28: And we can also compare y_fitted to the original y values without any noise
Step29: Not a bad job for your first fit function!
Steps for using curve_fit
Step30: So you've imported your data and plotted it. It should look similar to the figure below. Run the next cell to see.
Step31: What type of function would you say this is? Think back to the distributions we've learned about today. Any ideas?
|
12,590
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def fun_log(z):
return 1/(1+np.exp(-z))
z = np.linspace(-5, 5)
plt.figure(figsize = (8,6))
plt.plot(z, fun_log(z), lw = 2)
plt.xlabel('$z$')
plt.ylabel('$\sigma(z)$')
plt.grid()
plt.show()
def reg_log(B,Xa):
return fun_log(Xa.dot(B))
data_file = 'ex2data1.txt'
data = pd.read_csv(data_file, header=None)
X = data.iloc[:,0:2].values
Y = data.iloc[:,2].values
X
Y
plt.figure(figsize = (8,6))
plt.scatter(X[:,0], X[:,1], c=Y)
plt.show()
import pyomo_utilities
B = pyomo_utilities.logreg_clas(X, Y)
B
x = np.arange(20, 110, 0.5)
y = np.arange(20, 110, 0.5)
Xm, Ym = np.meshgrid(x, y)
m,n = np.shape(Xm)
Xmr = np.reshape(Xm,(m*n,1))
Ymr = np.reshape(Ym,(m*n,1))
Xa = np.append(np.ones((len(Ymr),1)), Xmr, axis=1)
Xa = np.append(Xa,Ymr,axis=1)
Yg = reg_log(B,Xa)
Z = np.reshape(Yg, (m,n))
Z = np.round(Z)
plt.figure(figsize=(10,10))
plt.contour(Xm, Ym, Z)
plt.scatter(X[:, 0],X[:, 1], c=Y, edgecolors='w')
plt.show()
X = 10*np.random.random((100, 2))
Y = (X[:, 1] > X[:, 0]**2)*1
plt.figure(figsize = (8,6))
plt.scatter(X[:,0], X[:,1], c=Y)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notamos que
Step2: 3. Funcional de costo
Step3: Diseñar un clasificador binario con regresión logística.
Step4: Los parámetros del clasificador son entonces
Step5: Actividad
|
12,591
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pymc3 as pm
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import theano.tensor as T
x = np.random.randn(100)
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sd=1)
sd = pm.Normal('sd', mu=0, sd=1)
obs = pm.Normal('obs', mu=mu, sd=sd, observed=x)
step = pm.Metropolis()
trace = pm.sample(5000, step)
pm.traceplot(trace);
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sd=1)
sd = pm.Normal('sd', mu=0, sd=1)
mu_print = T.printing.Print('mu')(mu)
sd_print = T.printing.Print('sd')(sd)
obs = pm.Normal('obs', mu=mu_print, sd=sd_print, observed=x)
step = pm.Metropolis()
trace = pm.sample(3, step) # Make sure not to draw too many samples
from io import StringIO
import sys
x = np.random.randn(100)
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sd=1)
sd = pm.Normal('sd', mu=0, sd=1)
mu_print = T.printing.Print('mu')(mu)
sd_print = T.printing.Print('sd')(sd)
obs = pm.Normal('obs', mu=mu_print, sd=sd_print, observed=x)
step = pm.Metropolis()
trace = pm.sample(3, step) # Make sure not to draw too many samples
sys.stdout = old_stdout
output = mystdout.getvalue().split('\n')
mulines = [s for s in output if 'mu' in s]
muvals = [line.split()[-1] for line in mulines]
plt.plot(np.arange(0,len(muvals)), muvals);
plt.xlabel('proposal iteration')
plt.ylabel('mu value')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hm, looks like something has gone wrong, but what? Let's look at the values getting proposed using the Print operator
Step2: Looks like sd is always 0 which will cause the logp to go to -inf. Of course, we should not have used a prior that has negative mass for sd but instead something like a HalfNormal.
|
12,592
|
<ASSISTANT_TASK:>
Python Code:
import re
three_repeating_characters = re.compile(r'(.)\1{2}')
with open('../inputs/day14.txt', 'r') as f:
salt = f.readline().strip()
# TEST DATA
# salt = 'abc'
print(salt)
import hashlib
hash_index= {}
def get_hash_string(key):
if key in hash_index:
return hash_index[key]
string = '{salt}{key}'.format(salt=salt, key=key)
md5 = hashlib.md5()
md5.update(string.encode('ascii'))
hashstring = md5.hexdigest()
hash_index[key] = hashstring
return hashstring
def run():
keys = []
current_key = 0
while(len(keys) < 64):
for i in range(0, current_key):
hash_index.pop(i, None)
hashstring = get_hash_string(current_key)
repeating_chacter = three_repeating_characters.findall(hashstring)
if not repeating_chacter:
current_key += 1
continue
repeating_chacter = repeating_chacter[0]
repeating_character_five = ''.join(repeating_chacter for i in range(0, 5))
for qualifying_index in range(current_key + 1, current_key + 1001):
hashstring = get_hash_string(qualifying_index)
if repeating_character_five in hashstring:
break
else:
current_key += 1
continue
keys.append(current_key)
print(len(keys), current_key)
current_key += 1
return keys
print('answer', run()[63])
hash_index = {}
def get_hash_string(key):
if key in hash_index:
return hash_index[key]
string = '{salt}{key}'.format(salt=salt, key=key)
md5 = hashlib.md5()
md5.update(string.encode('ascii'))
hashstring = md5.hexdigest()
# PART TWO
for i in range(0, 2016):
md5 = hashlib.md5()
md5.update(hashstring.encode('ascii'))
hashstring = md5.hexdigest()
hash_index[key] = hashstring
return hashstring
print('answer', run()[63])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hash index
Step2: Part Two
|
12,593
|
<ASSISTANT_TASK:>
Python Code:
spam = ["eggs", 7.12345] # This is a list, a comma-separated sequence of values between square brackets
print spam
print type(spam)
eggs = [spam,
1.2345,
"fooo"] # No problem with multi-line declaration
print eggs
spam = [] # And this is an empty list
print spam
spam = ("eggs", 7.12345) # This is a tuple, a comma-separated sequence of values between parentheses
print spam
print type(spam)
eggs = (spam,
1.2345,
"fooo") # Again, no problem with multiline declaration
print eggs
spam = ("eggs", ) # Single item tuple requires the comma
print spam
# what will be the output of this
spamOne = ("eggs")
print spamOne
spam = "eggs", # Actually, it is enough with the comma
print spam
spam = "eggs", 7.12345 # This is called tuple packing
print spam
val1, val2 = spam # And this is the opposite, tuple unpacking
print val1
print val2
spam = "spam"
eggs = "eggs"
eggs, spam = spam, eggs
print spam
print eggs
spam = ["eggs", 7.12345]
val1, val2 = spam # Unpacking also works with lists (but packing always generates tuples)
print val1
print val2
# And what about strings? Remember they are sequences too...
spam = "spam"
s, p, a, m = spam # Unpacking even works with strings
print s
print p
print a
print m
spam = ["1st", "2nd", "3rd", "4th", "5th"]
eggs = (spam, 1.2345, "fooo")
print "eggs" in spam
print "fooo" not in eggs
print "am" in "spam" # Check items membership
print "spam".find("am") # NOT recommended for membership
print spam.count("1st") # Count repetitions (slow)
print spam + spam
print eggs + eggs
print "spam" + "eggs" # Concatenation (shallow copy), must be of the same type
print spam * 5
print eggs * 3
print "spam" * 3 # Also "multiply" creating shallow copies concatenated
print len(spam)
print len(eggs)
print len("spam") # Obtain its length
# Let's obtain min and max values (slow)
print min([5, 6, 2])
print max("xyzw abcd XYZW ABCD")
# Let's see how indexing works
spam = ["1st", "2nd", "3rd", "4th", "5th"]
eggs = (spam, 1.2345, "fooo")
print spam[0]
print eggs[1]
print "spam"[2] # Access by index, starting from 0 to length - 1, may raise an exception
print spam[-1]
print eggs[-2]
print "spam"[-3] # Access by index, even negative
print eggs[0]
print eggs[0][0]
print eggs[0][0][-1] # Concatenate index accesses
# Let's see how slicing works
spam = ("1st", "2nd", "3rd", "4th", "5th")
print spam[1:3] # Use colon and a second index for slicing
print type(spam[1:4]) # It generates a brand new object (shallow copy)
spam = ["1st", "2nd", "3rd", "4th", "5th"]
print spam[:3]
print spam[1:7]
print spam[-2:7] # Negative indexes are also valid
print spam[3:-2]
print spam[:] # Without indexes it performs a shallow copy
print spam[1:7:2] # Use another colon and a third int to specify the step
print spam[::2]
print spam[::-2] # A negative step traverse the sequence in the other way
print spam[::-1] # Useful to reverse a sequence
# Let's try something different
spam = ["1st", "2nd", "3rd", "4th", "5th"]
spam[3] = 1
print spam # Index direct modification, may raise an exception
spam = [1, 2, 3, 4, 5]
eggs = ['a', 'b', 'c']
spam[1:3] = eggs
print spam # We can use slicing here too!
spam = [1, 2, 3, 4, 5, 6, 7, 8]
eggs = ['a', 'b', 'c']
spam[1:7:2] = eggs
print spam # We can use even slicing with step!!
spam = [1, 2, 3, 4, 5]
spam.append("a")
print spam # We can append an element at the end (amortized O(1))
spam = [1, 2, 3, 4, 5]
eggs = ['a', 'b', 'c']
spam.extend(eggs)
print spam # We can append another sequence elements at the end (amortized O(1))
spam = [1, 2, 3, 4, 5]
eggs = ['a', 'b', 'c']
spam.append(eggs)
print spam # Take care to not mix both commands!!
spam = [1, 2, 3, 4, 5]
spam.insert(3, "a")
print spam # The same like spam[3:3] = ["a"]
spam = [1, 2, 3, 4, 5]
print spam.pop()
print spam # Pop (remove and return) last item
print spam.pop(2)
print spam # Pop (remove and return) given item
spam = [1, 2, 3, 4, 5]
del spam[3]
print spam # Delete an item
spam = tuple([1, 2, 3, 4, 5, 6, 7, 8])
eggs = list(('a', 'b', 'c')) # Shallow copy constructors
print spam
print eggs
spam = {"one": 1, "two": 2, "three": 3} # This is a dictionary
print spam
print type(spam)
eggs = {1: "one",
2: "two",
3: "three"} # Again, no problem with multiline declaration
print eggs
spam = dict(one=1, two=2, three=3) # Use keyword arguments (we will talk about them in short)
print spam
eggs = dict([(1, "one"), (2, "two"), (3, "three")]) # Sequence of two elements sequences (key and object)
print eggs # Note that these tuples require the parentheses just to group
spam = dict(eggs) # Shallow copy constructor
print spam
spam = {"one": 1, "two": 2, "three": 3}
print spam["two"] # Access by key, may raise an exception
spam = {"one": 1, "two": 2, "three": 3}
print "two" in spam # Check keys membership
print 2 not in spam # Check keys membership
spam = {"one": 1, "two": 2, "three": 3}
print spam.get("two")
print spam.get("four")
print spam.get("four", 4) # Safer access by key, never raises an exception, optional default value
spam = {"one": 1, "two": 2, "three": 3}
print spam.keys() # Retrieve keys list (copy) in arbitrary order
print spam.values() # Retrieve values list (copy) in arbitrary order
print spam.items() # Retrieve key, values pairs list (copy) in arbitrary order
spam = {"one": 1, "two": 2, "three": 3}
spam["two"] = 22 # Set or replace a key value
spam["four"] = 44 # Set or replace a key value
print spam
spam = {"one": 1, "two": 2, "three": 3}
print spam.popitem()
print spam
spam = {"one": 1, "two": 2, "three": 3}
print spam.pop("two") # Pop (remove and return) given item, may raise an exception
print spam.pop("four", 4) # Pop (remove and return) given item with optional default value
print spam
spam = {"one": 1, "two": 2, "three": 3}
eggs = {"three": 33, "four": 44}
spam.update(eggs) # Update dictionary with other dict content
print spam
spam = {"one": 1, "two": 2, "three": 3}
eggs = {1: "one", 2: "two", 3: "three"}
spam.update(two=22, four=44) # Like dict constructor, it accepts keyword arguments
eggs.update([(0, "ZERO"), (1, "ONE")]) # Like dict constructor, it accepts a sequence of pairs
print spam
print eggs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can mix all kind of types inside a list
Step2: What about tuples?
Step3: What about both together?
Step4: Let's go back to lists
Step5: str and unicode are also sequences
Step6: In slicing Python is able to cleverly set the indexes
Step is 1 by default and does not need to be multiple of sequence length
Step7: Let's see some slice modifications
Step8: SOURCES
Step9: Still more ways to declare dictionaries
Step10: Python mappings
Step11: Let's play a bit with inplace modifications of dicts content
|
12,594
|
<ASSISTANT_TASK:>
Python Code:
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
import os, json, math, shutil
import datetime
import numpy as np
import logging
# SET TF ERROR LOG VERBOSITY
logging.getLogger("tensorflow").setLevel(logging.ERROR)
import tensorflow as tf
print(tf.version.VERSION)
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
os.environ["BUCKET"] = PROJECT # DEFAULT BUCKET WILL BE PROJECT ID
if PROJECT == "your-gcp-project-here":
print("Don't forget to update your PROJECT name! Currently:", PROJECT)
%%bash
## Create new ML GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${PROJECT}-ml/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${PROJECT}-ml
echo -e "\nHere are your current buckets:"
gsutil ls
fi
# Note that this cell is special. It's got a tag (you can view tags by clicking on the wrench icon on the left menu in Jupyter)
# These are parameters that we will configure so that we can schedule this notebook
DATADIR = '../../data'
OUTDIR = './trained_model'
NBUCKETS = 10 # for feature crossing
TRAIN_BATCH_SIZE = 32
NUM_TRAIN_EXAMPLES = 10000 * 5 # remember the training dataset repeats, so this will wrap around
NUM_EVALS = 5 # evaluate this many times
NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample, but no so much that it slows down
if DATADIR[:5] == 'gs://':
!gsutil ls $DATADIR/*.csv
else:
!ls -l $DATADIR/*.csv
CSV_COLUMNS = ['fare_amount', 'pickup_datetime',
'pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude',
'passenger_count', 'key']
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0],['na'],[0.0],[0.0],[0.0],[0.0],[0.0],['na']]
def features_and_labels(row_data):
for unwanted_col in ['key']: # keep the pickup_datetime!
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label # features, label
# load the training data
def load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
pattern = '{}/{}'.format(DATADIR, pattern)
dataset = (
# TODO 1: Recall from earlier how you used tf.data to read the CSV files (no changes needed):
tf.data.experimental.make_csv_dataset(pattern, batch_size, CSV_COLUMNS, DEFAULTS)
.map(features_and_labels) # features, label
)
if mode == tf.estimator.ModeKeys.TRAIN:
print("Repeating training dataset indefinitely")
dataset = dataset.shuffle(1000).repeat()
dataset = dataset.prefetch(1) # take advantage of multi-threading; 1=AUTOTUNE
return dataset
def parse_datetime(s):
if type(s) is not str:
s = s.numpy().decode('utf-8') # if it is a Tensor
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
for s in ['2012-07-05 14:18:00 UTC']:
print(s)
for ts in [parse_datetime(s), parse_datetime(tf.constant(s))]: # as string, as tensor
print(ts.weekday())
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
print(DAYS[ts.weekday()])
## Add transformations
def euclidean(params):
lon1, lat1, lon2, lat2 = params
# TODO 2: Create two new features called londiff and latdiff
# These should be the difference between lon - lon and lat - lat
londiff =
latdiff =
return tf.sqrt(londiff*londiff + latdiff*latdiff)
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in
)
@tf.function
def fare_thresh(x):
return 60 * tf.keras.activations.relu(x)
def transform(inputs, NUMERIC_COLS, STRING_COLS):
print("BEFORE TRANSFORMATION")
print("INPUTS:", inputs.keys())
# Pass-through columns
transformed = inputs.copy()
del transformed['pickup_datetime']
feature_columns = {
colname: tf.feature_column.numeric_column(colname)
for colname in NUMERIC_COLS
}
# scale the lat, lon values to be in 0, 1
if True:
for lon_col in ['pickup_longitude', 'dropoff_longitude']: # in range -70 to -78
transformed[lon_col] = tf.keras.layers.Lambda(
lambda x: (x+78)/8.0,
name='scale_{}'.format(lon_col)
)(inputs[lon_col])
for lat_col in ['pickup_latitude', 'dropoff_latitude']: # in range 37 to 45
transformed[lat_col] = tf.keras.layers.Lambda(
lambda x: (x-37)/8.0,
name='scale_{}'.format(lat_col)
)(inputs[lat_col])
# add Euclidean distance. Doesn't have to be accurate calculation because NN will calibrate it
if True:
transformed['euclidean'] = tf.keras.layers.Lambda(euclidean, name='euclidean')([
inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']
])
feature_columns['euclidean'] = tf.feature_column.numeric_column('euclidean')
# hour of day from timestamp of form '2010-02-08 09:17:00+00:00'
if True:
transformed['hourofday'] = tf.keras.layers.Lambda(
lambda x: tf.strings.to_number(tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32),
name='hourofday'
)(inputs['pickup_datetime'])
feature_columns['hourofday'] = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity('hourofday', num_buckets=24))
if False:
# day of week is hard because there is no TensorFlow function for date handling
transformed['dayofweek'] = tf.keras.layers.Lambda(
lambda x: dayofweek(x),
name='dayofweek_pyfun'
)(inputs['pickup_datetime'])
transformed['dayofweek'] = tf.keras.layers.Reshape((), name='dayofweek')(transformed['dayofweek'])
feature_columns['dayofweek'] = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'dayofweek', vocabulary_list = DAYS))
if True:
# featurecross lat, lon into nxn buckets, then embed
nbuckets = NBUCKETS
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = tf.feature_column.bucketized_column(feature_columns['pickup_latitude'], latbuckets)
b_dlat = tf.feature_column.bucketized_column(feature_columns['dropoff_latitude'], latbuckets)
b_plon = tf.feature_column.bucketized_column(feature_columns['pickup_longitude'], lonbuckets)
b_dlon = tf.feature_column.bucketized_column(feature_columns['dropoff_longitude'], lonbuckets)
ploc = tf.feature_column.crossed_column([b_plat, b_plon], nbuckets * nbuckets)
dloc = tf.feature_column.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = tf.feature_column.crossed_column([ploc, dloc], nbuckets ** 4 )
feature_columns['pickup_and_dropoff'] = tf.feature_column.embedding_column(pd_pair, 100)
print("AFTER TRANSFORMATION")
print("TRANSFORMED:", transformed.keys())
print("FEATURES", feature_columns.keys())
return transformed, feature_columns
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model():
# input layer is all float except for pickup_datetime which is a string
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = set(CSV_COLUMNS) - set([LABEL_COLUMN, 'key']) - set(STRING_COLS)
print(STRING_COLS)
print(NUMERIC_COLS)
inputs = {
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(inputs, NUMERIC_COLS, STRING_COLS)
# TODO 3: Specify the dense feature layers for the DNN as inputs
# Tip: Refer to https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/DenseFeatures
dnn_inputs =
# two hidden layers of [32, 8] just in like the BQML DNN
h1 = tf.keras.layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
h2 = tf.keras.layers.Dense(8, activation='relu', name='h2')(h1)
if False:
# final output would normally have a linear activation because this is regression
# However, we know something about the taxi problem (fares are +ve and tend to be below $60).
# Use that here. (You can verify by running this query):
output = tf.keras.layers.Dense(1, activation=fare_thresh, name='fare')(h2)
else:
output = tf.keras.layers.Dense(1, name='fare')(h2)
model = tf.keras.models.Model(inputs, output)
model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse'])
return model
model = build_dnn_model()
print(model.summary())
tf.keras.utils.plot_model(model, 'dnn_model.png', show_shapes=False, rankdir='LR')
trainds = load_dataset('taxi-train*', TRAIN_BATCH_SIZE, tf.estimator.ModeKeys.TRAIN)
evalds = load_dataset('taxi-valid*', 1000, tf.estimator.ModeKeys.EVAL).take(NUM_EVAL_EXAMPLES//10000) # evaluate on 1/10 final evaluation set
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
shutil.rmtree('{}/checkpoints/'.format(OUTDIR), ignore_errors=True)
checkpoint_path = '{}/checkpoints/taxi'.format(OUTDIR)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
history = model.fit(trainds,
validation_data=evalds,
epochs=NUM_EVALS,
steps_per_epoch=steps_per_epoch,
verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch
callbacks=[cp_callback])
# plot
import matplotlib.pyplot as plt
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(['loss', 'rmse']):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
evalds = load_dataset('taxi-valid*', 1000, tf.estimator.ModeKeys.EVAL).take(NUM_EVAL_EXAMPLES//1000)
model.evaluate(evalds)
# TODO 4: Make example predictions. Experiment with different passenger_counts and pickup times and re-run.
model.predict({
'pickup_longitude': tf.convert_to_tensor([-73.982683]),
'pickup_latitude': tf.convert_to_tensor([40.742104]),
'dropoff_longitude': tf.convert_to_tensor([-73.983766]),
'dropoff_latitude': tf.convert_to_tensor([40.755174]),
'passenger_count': tf.convert_to_tensor([3.0]),
'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00 UTC'], dtype=tf.string),
}, steps=1)
import shutil, os, datetime
OUTPUT_DIR = os.path.join(OUTDIR, 'export/savedmodel')
if OUTPUT_DIR[:5] != 'gs://':
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
# TODO 5: Export the model in preparation for serving later
# Specify the model and export path to save to
# Tip: Refer to: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/saved_model/save
tf. # <--- complete
!saved_model_cli show --tag_set serve --signature_def serving_default --dir {EXPORT_PATH}
!find {EXPORT_PATH}
os.environ['EXPORT_PATH'] = EXPORT_PATH
%%bash
PROJECT=${PROJECT}
BUCKET=${PROJECT}-ml
REGION=us-east1
MODEL_NAME=taxifare
VERSION_NAME=v2
if [[ $(gcloud ai-platform models list --format='value(name)' | grep $MODEL_NAME) ]]; then
echo "$MODEL_NAME already exists"
else
# create model
echo "Creating $MODEL_NAME"
gcloud ai-platform models create --regions=$REGION $MODEL_NAME
fi
if [[ $(gcloud ai-platform versions list --model $MODEL_NAME --format='value(name)' | grep $VERSION_NAME) ]]; then
echo "Deleting already existing $MODEL_NAME:$VERSION_NAME ... "
gcloud ai-platform versions delete --model=$MODEL_NAME $VERSION_NAME
echo "Please run this cell again if you don't see a Creating message ... "
sleep 10
fi
# create model
echo "Creating $MODEL_NAME:$VERSION_NAME"
gcloud ai-platform versions create --model=$MODEL_NAME $VERSION_NAME --async \
--framework=tensorflow --python-version=3.7 --runtime-version=1.15 \
--origin=$EXPORT_PATH --staging-bucket=gs://$BUCKET
%%writefile repro.json
{"pickup_longitude": -73.982683, "pickup_latitude": 40.742104, "dropoff_longitude": -73.983766, "dropoff_latitude": 40.755174, "passenger_count": 3.0, "pickup_datetime": "2010-02-08 09:17:00 UTC"}
!gcloud ai-platform predict --model taxifare --json-instances repro.json --version v2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a new Google Cloud Storage Bucket for ML model exports
Step2: Setup parameters for notebook scheduling
Step3: Locating the CSV files
Step4: Use tf.data to read the CSV files
Step5: Add data transformations and build the new DNN
Step6: Visualize the DNN model layers
Step7: Train the model
Step8: Visualize the Model Loss Curves
Step9: Evaluate over the full validation dataset
Step10: Although we get RMSE of around 10 (your answer will be different due to random seeds), remember that we trained on a really small subset of the data. We need a larger training dataset before making decisions about this model.
Step11: However, this is not realistic, because we can't expect client code to have a model object in memory. We'll have to export our model to a file, and expect client code to instantiate the model from that exported file.
Step12: Serve the model on Cloud AI Platform
Step13: In this notebook, we have looked at how to implement a custom Keras model using feature columns.
|
12,595
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
capitais = pd.read_csv('capitais.csv', sep=',')
capitais.head() # Imprime os 5 primeiros elementos do dataframe
capitais = pd.read_csv('capitais.csv', sep=',', index_col='municipio')
capitais.head()
len(capitais.index)
capitais.index
capitais.columns
capitais["populacao 2015"].head()
capitais.get('sao Paulo', 'Não encontrou')
capitais.area_km2.head()
capitais.populacao 2015
capitais = capitais.rename(columns={'populacao 2015' : 'populacao_2015'})
capitais.head()
capitais.populacao_2015.head()
def capitais_com_letra(todas_capitais, letra):
# seu código aqui
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note que o índice que foi gerado é o padrão e não o default. Para defirmos a coluna municipio como sendo o índice, precisamos passar o parâmetro
Step2: Agora sim! Podemos verificar os índices da linha e das colunas do dataframe utilizando os seguintes comando.
Step3: Para acessar uma coluna, podemos acessar da mesma maneira como fizemos com dicionários, passando o rótulo.
Step4: Se a chave não existir
Step5: Outra forma de acessar os atributos de uma DF é utilizar o operador ponto.
Step6: Um identificador válido dever basicamente começar com letra e não ter espaços em bracos e alguns caracteres especiais. Segue a referência
Step7: Exercícios
|
12,596
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'gfdl-esm2m', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
DOC.set_value("Other: ice")
DOC.set_value("bare soil")
DOC.set_value("lake")
DOC.set_value("vegetated")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Soil type prescibed at each grid point")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Soil type nprescibed at each grid point")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Bare soil albedo prescibed at each grid point")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Dynamic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
DOC.set_value("distinction between direct and diffuse albedo")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(2)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(20)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
DOC.set_value("Other: generalized richards equation")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(20)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Basic thermodynamics")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
DOC.set_value("Explicit diffusion")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
DOC.set_value("Other: plant uptake and ground divergence")
DOC.set_value("soil moisture freeze-thaw")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(5)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
DOC.set_value("constant")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prognostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prognostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prognostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
DOC.set_value("ground snow fraction")
DOC.set_value("vegetation snow fraction")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
DOC.set_value("Other: snow refreezing")
DOC.set_value("snow interception")
DOC.set_value("snow melting")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
DOC.set_value("prognostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
DOC.set_value("vegetation type")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
DOC.set_value("biome types")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
DOC.set_value("deciduous broadleaf forest")
DOC.set_value("evergreen broadleaf forest")
DOC.set_value("evergreen needleleaf forest")
DOC.set_value("grassland")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
DOC.set_value("dynamical (varying from simulation)")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
DOC.set_value("prognostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("diagnostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prognostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
DOC.set_value("CO2")
DOC.set_value("light")
DOC.set_value("temperature")
DOC.set_value("water availability")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(1)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
DOC.set_value("Other: linked to photosynthesis")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
DOC.set_value("transpiration")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(5)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Leaves, virtual leaves, roots, sapwood, heartwood")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
DOC.set_value("leaves + fine roots + coarse roots + stems")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
DOC.set_value("function of plant allometry")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Fast and slow soil carbon")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(1)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
DOC.set_value("present day")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
DOC.set_value("direct (large rivers)")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
DOC.set_value("heat")
DOC.set_value("water")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
DOC.set_value("heat")
DOC.set_value("water")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prognostic")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
DOC.set_value("vertical")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
12,597
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inm', 'inm-cm5-h', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
12,598
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") )
### first element is our labels, any added elements are predictor
### features. Keep this the same for the mini-project, but you'll
### have a different feature list when you do the final project.
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
print len(labels), len(features)
from sklearn import tree
from time import time
def submitAcc(features, labels):
return clf.score(features, labels)
clf = tree.DecisionTreeClassifier()
t0 = time()
clf.fit(features, labels)
print("done in %0.3fs" % (time() - t0))
pred = clf.predict(features)
print "Classifier with accurancy %.2f%%" % (submitAcc(features, labels))
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(features, labels, test_size=0.30, random_state=42)
print len(X_train), len(y_train)
print len(X_test), len(y_test)
clf = tree.DecisionTreeClassifier()
t0 = time()
clf.fit(X_train, y_train)
print("done in %0.3fs" % (time() - t0))
pred = clf.predict(X_test)
print "Classifier with accurancy %.2f%%" % (submitAcc(X_test, y_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a decision tree classifier (just use the default parameters), train it on all the data. Print out the accuracy.
Step2: Now you’ll add in training and testing, so that you get a trustworthy accuracy number. Use the train_test_split validation available in sklearn.cross_validation; hold out 30% of the data for testing and set the random_state parameter to 42 (random_state controls which points go into the training set and which are used for testing; setting it to 42 means we know exactly which events are in which set, and can check the results you get).
|
12,599
|
<ASSISTANT_TASK:>
Python Code:
# -*- coding: UTF-8 -*-
import pandas as pd
import numpy as np
import re
from bs4 import BeautifulSoup
def review_to_wordlist(review):
'''
把IMDB的评论转成词序列
参考:http://blog.csdn.net/longxinchen_ml/article/details/50629613
'''
# 去掉HTML标签,拿到内容
review_text = BeautifulSoup(review, "html.parser").get_text()
# 用正则表达式取出符合规范的部分
review_text = re.sub("[^a-zA-Z]"," ", review_text)
# 小写化所有的词,并转成词list
words = review_text.lower().split()
# 返回words
return words
# 载入数据集
train = pd.read_csv('/Users/frank/Documents/workspace/kaggle/dataset/Bag_of_Words_Meets_Bags_of_Popcorn/labeledTrainData.tsv', header=0, delimiter="\t", quoting=3)
test = pd.read_csv('/Users/frank/Documents/workspace/kaggle/dataset/Bag_of_Words_Meets_Bags_of_Popcorn/testData.tsv', header=0, delimiter="\t", quoting=3)
print train.head()
print test.head()
# 预处理数据
label = train['sentiment']
train_data = []
for i in range(len(train['review'])):
train_data.append(' '.join(review_to_wordlist(train['review'][i])))
test_data = []
for i in range(len(test['review'])):
test_data.append(' '.join(review_to_wordlist(test['review'][i])))
# 预览数据
print train_data[0], '\n'
print test_data[0]
from sklearn.feature_extraction.text import TfidfVectorizer as TFIDF
# 参考:http://blog.csdn.net/longxinchen_ml/article/details/50629613
tfidf = TFIDF(min_df=2, # 最小支持度为2
max_features=None,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
ngram_range=(1, 3), # 二元文法模型
use_idf=1,
smooth_idf=1,
sublinear_tf=1,
stop_words = 'english') # 去掉英文停用词
# 合并训练和测试集以便进行TFIDF向量化操作
data_all = train_data + test_data
len_train = len(train_data)
tfidf.fit(data_all)
data_all = tfidf.transform(data_all)
# 恢复成训练集和测试集部分
train_x = data_all[:len_train]
test_x = data_all[len_train:]
print 'TF-IDF处理结束.'
from sklearn.naive_bayes import MultinomialNB as MNB
model_NB = MNB()
model_NB.fit(train_x, label)
MNB(alpha=1.0, class_prior=None, fit_prior=True)
from sklearn.cross_validation import cross_val_score
import numpy as np
print "多项式贝叶斯分类器10折交叉验证得分: ", np.mean(cross_val_score(model_NB, train_x, label, cv=10, scoring='roc_auc'))
test_predicted = np.array(model_NB.predict(test_x))
print '保存结果...'
nb_output = pd.DataFrame(data=test_predicted, columns=['sentiment'])
nb_output['id'] = test['id']
nb_output = nb_output[['id', 'sentiment']]
nb_output.to_csv('nb_output.csv', index=False)
print '结束.'
from sklearn.linear_model import LogisticRegression as LR
from sklearn.grid_search import GridSearchCV
# 设定grid search的参数
grid_values = {'C':[30]}
# 设定打分为roc_auc
model_LR = GridSearchCV(LR(penalty = 'L2', dual = True, random_state = 0), grid_values, scoring = 'roc_auc', cv = 20)
model_LR.fit(train_x, label)
# 20折交叉验证
GridSearchCV(cv=20, estimator=LR(C=1.0, class_weight=None, dual=True,
fit_intercept=True, intercept_scaling=1, penalty='L2', random_state=0, tol=0.0001),
fit_params={}, iid=True, n_jobs=1,
param_grid={'C': [30]}, pre_dispatch='2*n_jobs', refit=True,
scoring='roc_auc', verbose=0)
#输出结果
print model_LR.grid_scores_
test_predicted = np.array(model_LR.predict(test_x))
print '保存结果...'
lr_output = pd.DataFrame(data=test_predicted, columns=['sentiment'])
lr_output['id'] = test['id']
lr_output = lr_output[['id', 'sentiment']]
lr_output.to_csv('lr_output.csv', index=False)
print '结束.'
import gensim
import nltk
from nltk.corpus import stopwords
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
def review_to_wordlist( review, remove_stopwords=False ):
review_text = BeautifulSoup(review, "html.parser").get_text()
review_text = re.sub("[^a-zA-Z]"," ", review_text)
words = review_text.lower().split()
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
return(words)
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
'''
将评论段落转换为句子,返回句子列表,每个句子由一堆词组成
'''
raw_sentences = tokenizer.tokenize(review.strip().decode('utf8'))
sentences = []
for raw_sentence in raw_sentences:
if len(raw_sentence) > 0:
# 获取句子中的词列表
sentences.append( review_to_wordlist( raw_sentence, remove_stopwords ))
return sentences
sentences = []
for i, review in enumerate(train["review"]):
sentences += review_to_sentences(review, tokenizer)
unlabeled_train = pd.read_csv("/Users/frank/Documents/workspace/kaggle/dataset/Bag_of_Words_Meets_Bags_of_Popcorn/unlabeledTrainData.tsv", header=0, delimiter="\t", quoting=3 )
for review in unlabeled_train["review"]:
sentences += review_to_sentences(review, tokenizer)
print '预处理unlabeled_train data...'
print len(train_data)
print len(sentences)
import time
from gensim.models import Word2Vec
# 模型参数
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
%%time
# 训练模型
print("训练模型中...")
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count = min_word_count, \
window = context, sample = downsampling)
print '保存模型...'
model.init_sims(replace=True)
model_name = "300features_40minwords_10context"
model.save(model_name)
model.doesnt_match("man woman child kitchen".split())
model.doesnt_match("france england germany berlin".split())
model.doesnt_match("paris berlin london austria".split())
model.most_similar("man")
model.most_similar("queen")
model.most_similar("awful")
def makeFeatureVec(words, model, num_features):
'''
对段落中的所有词向量进行取平均操作
'''
featureVec = np.zeros((num_features,), dtype="float32")
nwords = 0.
# Index2word包含了词表中的所有词,为了检索速度,保存到set中
index2word_set = set(model.index2word)
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec, model[word])
# 取平均
featureVec = np.divide(featureVec, nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
'''
给定一个文本列表,每个文本由一个词列表组成,返回每个文本的词向量平均值
'''
counter = 0.
reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32")
for review in reviews:
if counter % 5000. == 0.:
print("Review %d of %d" % (counter, len(reviews)))
reviewFeatureVecs[counter] = makeFeatureVec(review, model, \
num_features)
counter = counter + 1.
return reviewFeatureVecs
%time trainDataVecs = getAvgFeatureVecs( train_data, model, num_features )
%time testDataVecs = getAvgFeatureVecs(test_data, model, num_features)
from sklearn.naive_bayes import GaussianNB as GNB
model_GNB = GNB()
model_GNB.fit(trainDataVecs, label)
from sklearn.cross_validation import cross_val_score
import numpy as np
print "高斯贝叶斯分类器10折交叉验证得分: ", np.mean(cross_val_score(model_GNB, trainDataVecs, label, cv=10, scoring='roc_auc'))
result = forest.predict( testDataVecs )
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
output.to_csv( "gnb_word2vec.csv", index=False, quoting=3 )
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier( n_estimators = 100, n_jobs=2)
print("Fitting a random forest to labeled training data...")
%time forest = forest.fit( trainDataVecs, label )
print "随机森林分类器10折交叉验证得分: ", np.mean(cross_val_score(forest, trainDataVecs, label, cv=10, scoring='roc_auc'))
# 测试集
result = forest.predict( testDataVecs )
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
output.to_csv( "rf_word2vec.csv", index=False, quoting=3 )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 载入数据集
Step2: 预处理数据
Step3: 特征处理
Step4: 朴素贝叶斯训练
Step5: 提交最终的结果到kaggle,AUC为:0.85728,排名300左右,50%的水平
Step6: 提交最终的结果到kaggle,AUC为:0.88956,排名260左右,比之前贝叶斯模型有所提高
Step7: 构建word2vec模型
Step8: 预览模型
Step9: 使用Word2vec特征
Step10: 高斯贝叶斯+Word2vec训练
Step11: 从验证结果来看,没有超过基于TF-IDF多项式贝叶斯模型
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.